file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
CheckList.js | // All material copyright ESRI, All Rights Reserved, unless otherwise specified.
// See http://js.arcgis.com/3.12/esri/copyright.txt for details.
//>>built
define("esri/dijit/geoenrichment/CheckList","../../declare dojo/dom-construct dojo/dom-class dojo/_base/lang dojo/query dojo/store/Memory dgrid/List dgrid/Selection dijit/layout/ContentPane".split(" "),function(d,e,f,g,k,l,m,n){function | (a,b){var c=e.create("div",{"class":"TrimWithEllipses"});b&&e.create("div",{"class":"dijit dijitInline dijitCheckBox VarCheck"},c);e.create("span",{"class":"VarLabel",innerHTML:a},c);return c}d=d("esri.dijit.geoenrichment.CheckList",[m,n],{selectionMode:"toggle",
store:null,selectedItems:null,useTouchScroll:!1,_setStore:function(a){this.store=a;this.refresh();this.renderArray(a.data)},_setItems:function(a){a=new l({data:a});this.set("store",a);this.refresh();this.renderArray(a.data)},buildRendering:function(){this.inherited(arguments);var a=g.hitch(this,this._onSelect);this.on("dgrid-select",a);this.on("dgrid-deselect",g.hitch(this,this._onDeselect))},select:function(a){this.inherited(arguments);var b=this.row(a).element;if(b){var c=k(".dijitCheckBox",b)[0];
c&&(f.contains(b,"dgrid-selected")?f.add(c,"dijitCheckBoxChecked"):f.remove(c,"dijitCheckBoxChecked"))}},renderRow:function(a,b){return h(a.description?a.description:a.alias,"single"!=this.selectionMode)},_setSelection:function(){this.selection=this.get("selection");this.selectedItems=[];if(this.selection&&this.store.data)for(var a=this.store.data,b=0;b<a.length;b++)this.selection[a[b].id]&&this.selectedItems.push(a[b])},_onSelect:function(a){this._setSelection();this.onSelect(a)},_onDeselect:function(a){this._setSelection();
this.onDeselect(a)},onDeselect:function(a){},onSelect:function(a){}});d.renderCheckBox=h;return d}); | h |
abi.rs | use llvm::{self, AttributePlace};
use rustc_codegen_ssa::MemFlags;
use builder::Builder;
use context::CodegenCx;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::mir::operand::OperandValue;
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
use value::Value;
use rustc_target::abi::call::ArgType;
use rustc_codegen_ssa::traits::*;
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
use rustc::ty::{self, Ty, Instance};
use rustc::ty::layout;
use libc::c_uint;
pub use rustc_target::spec::abi::Abi;
pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
pub use rustc_target::abi::call::*;
macro_rules! for_each_kind {
($flags: ident, $f: ident, $($kind: ident),+) => ({
$(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
})
}
trait ArgAttributeExt {
fn for_each_kind<F>(&self, f: F) where F: FnMut(llvm::Attribute);
}
impl ArgAttributeExt for ArgAttribute {
fn for_each_kind<F>(&self, mut f: F) where F: FnMut(llvm::Attribute) {
for_each_kind!(self, f,
ByVal, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
}
}
pub trait ArgAttributesExt {
fn apply_llfn(&self, idx: AttributePlace, llfn: &Value);
fn apply_callsite(&self, idx: AttributePlace, callsite: &Value);
}
impl ArgAttributesExt for ArgAttributes {
fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
let mut regular = self.regular;
unsafe {
let deref = self.pointee_size.bytes();
if deref != 0 {
if regular.contains(ArgAttribute::NonNull) {
llvm::LLVMRustAddDereferenceableAttr(llfn,
idx.as_uint(),
deref);
} else {
llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
idx.as_uint(),
deref);
}
regular -= ArgAttribute::NonNull;
}
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentAttr(llfn,
idx.as_uint(),
align.bytes() as u32);
}
regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
}
}
fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
let mut regular = self.regular;
unsafe {
let deref = self.pointee_size.bytes();
if deref != 0 {
if regular.contains(ArgAttribute::NonNull) {
llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
idx.as_uint(),
deref);
} else {
llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
idx.as_uint(),
deref);
}
regular -= ArgAttribute::NonNull;
}
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
idx.as_uint(),
align.bytes() as u32);
}
regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
}
}
}
pub trait LlvmType {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
}
impl LlvmType for Reg {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
match self.kind {
RegKind::Integer => cx.type_ix(self.size.bits()),
RegKind::Float => {
match self.size.bits() {
32 => cx.type_f32(),
64 => cx.type_f64(),
_ => bug!("unsupported float: {:?}", self)
}
}
RegKind::Vector => {
cx.type_vector(cx.type_i8(), self.size.bytes())
}
}
}
}
impl LlvmType for CastTarget {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
let rest_ll_unit = self.rest.unit.llvm_type(cx);
let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
(0, 0)
} else {
(self.rest.total.bytes() / self.rest.unit.size.bytes(),
self.rest.total.bytes() % self.rest.unit.size.bytes())
};
if self.prefix.iter().all(|x| x.is_none()) {
// Simplify to a single unit when there is no prefix and size <= unit size
if self.rest.total <= self.rest.unit.size {
return rest_ll_unit;
}
// Simplify to array when all chunks are the same size and type
if rem_bytes == 0 {
return cx.type_array(rest_ll_unit, rest_count);
}
}
// Create list of fields in the main structure
let mut args: Vec<_> =
self.prefix.iter().flat_map(|option_kind| option_kind.map(
|kind| Reg { kind: kind, size: self.prefix_chunk }.llvm_type(cx)))
.chain((0..rest_count).map(|_| rest_ll_unit))
.collect();
// Append final integer
if rem_bytes != 0 {
// Only integers can be really split further.
assert_eq!(self.rest.unit.kind, RegKind::Integer);
args.push(cx.type_ix(rem_bytes * 8));
}
cx.type_struct(&args, false)
}
}
pub trait ArgTypeExt<'ll, 'tcx> {
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn store(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>,
);
fn store_fn_arg(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
idx: &mut usize,
dst: PlaceRef<'tcx, &'ll Value>,
);
}
impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
/// Gets the LLVM type for a place of the original Rust type of
/// this argument/return, i.e., the result of `type_of::type_of`.
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
self.layout.llvm_type(cx)
}
/// Stores a direct/indirect value described by this ArgType into a
/// place for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
fn store(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>,
) {
if self.is_ignore() {
return;
}
if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized ArgType must be handled through store_fn_arg");
} else if let PassMode::Cast(cast) = self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
// code that follows is the only reliable way I have
// found to do a transform like i64 -> {i32,i32}.
// Basically we dump the data onto the stack then memcpy it.
//
// Other approaches I tried:
// - Casting rust ret pointer to the foreign type and using Store
// is (a) unsafe if size of foreign type > size of rust type and
// (b) runs afoul of strict aliasing rules, yielding invalid
// assembly under -O (specifically, the store gets removed).
// - Truncating foreign type to correct integral type and then
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
let scratch_size = cast.size(bx);
let scratch_align = cast.align(bx);
let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align);
bx.lifetime_start(llscratch, scratch_size);
// ...where we first store the value...
bx.store(val, llscratch, scratch_align);
// ...and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
self.layout.align.abi,
llscratch,
scratch_align,
bx.const_usize(self.layout.size.bytes()),
MemFlags::empty()
);
bx.lifetime_end(llscratch, scratch_size);
}
} else {
OperandValue::Immediate(val).store(bx, dst);
}
}
fn store_fn_arg(
&self,
bx: &mut Builder<'a, 'll, 'tcx>,
idx: &mut usize,
dst: PlaceRef<'tcx, &'ll Value>,
) {
let mut next = || {
let val = llvm::get_param(bx.llfn(), *idx as c_uint);
*idx += 1;
val
};
match self.mode {
PassMode::Ignore => {},
PassMode::Pair(..) => {
OperandValue::Pair(next(), next()).store(bx, dst);
}
PassMode::Indirect(_, Some(_)) => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
}
PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
self.store(bx, next(), dst);
}
}
}
}
impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn store_fn_arg(
&mut self,
ty: &ArgType<'tcx, Ty<'tcx>>,
idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>
) {
ty.store_fn_arg(self, idx, dst)
}
fn store_arg_ty(
&mut self,
ty: &ArgType<'tcx, Ty<'tcx>>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>
) {
ty.store(self, val, dst)
}
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
ty.memory_ty(self)
}
}
pub trait FnTypeExt<'tcx> {
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self;
fn new(cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self;
fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self;
fn new_internal(
cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
) -> Self;
fn adjust_for_abi(&mut self,
cx: &CodegenCx<'ll, 'tcx>,
abi: Abi);
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn llvm_cconv(&self) -> llvm::CallConv;
fn apply_attrs_llfn(&self, llfn: &'ll Value);
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
}
impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self {
let sig = instance.fn_sig(cx.tcx);
let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
FnType::new(cx, sig, &[])
}
fn new(cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self {
FnType::new_internal(cx, sig, extra_args, |ty, _| {
ArgType::new(cx.layout_of(ty))
})
}
fn new_vtable(cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> Self {
FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| {
let mut layout = cx.layout_of(ty);
// Don't pass the vtable, it's not an argument of the virtual fn.
// Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
// or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
if arg_idx == Some(0) {
let fat_pointer_ty = if layout.is_unsized() {
// unsized `self` is passed as a pointer to `self`
// FIXME (mikeyhew) change this to use &own if it is ever added to the language
cx.tcx.mk_mut_ptr(layout.ty)
} else {
match layout.abi {
LayoutAbi::ScalarPair(..) => (),
_ => bug!("receiver type has unsupported layout: {:?}", layout)
}
// In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
// with a Scalar (not ScalarPair) ABI. This is a hack that is understood
// elsewhere in the compiler as a method on a `dyn Trait`.
// To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
// get a built-in pointer type
let mut fat_pointer_layout = layout;
'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
&& !fat_pointer_layout.ty.is_region_ptr()
{
'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
let field_layout = fat_pointer_layout.field(cx, i);
if !field_layout.is_zst() {
fat_pointer_layout = field_layout;
continue 'descend_newtypes
}
}
bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
}
fat_pointer_layout.ty
};
// we now have a type like `*mut RcBox<dyn Trait>`
// change its layout to that of `*mut ()`, a thin pointer, but keep the same type
// this is understood as a special case elsewhere in the compiler
let unit_pointer_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_unit());
layout = cx.layout_of(unit_pointer_ty);
layout.ty = fat_pointer_ty;
}
ArgType::new(layout)
})
}
fn new_internal(
cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
) -> Self {
debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
use self::Abi::*;
let conv = match cx.sess().target.target.adjust_abi(sig.abi) {
RustIntrinsic | PlatformIntrinsic |
Rust | RustCall => Conv::C,
// It's the ABI's job to select this, not ours.
System => bug!("system abi should be selected elsewhere"),
Stdcall => Conv::X86Stdcall,
Fastcall => Conv::X86Fastcall,
Vectorcall => Conv::X86VectorCall,
Thiscall => Conv::X86ThisCall,
C => Conv::C,
Unadjusted => Conv::C,
Win64 => Conv::X86_64Win64,
SysV64 => Conv::X86_64SysV,
Aapcs => Conv::ArmAapcs,
PtxKernel => Conv::PtxKernel,
Msp430Interrupt => Conv::Msp430Intr,
X86Interrupt => Conv::X86Intr,
AmdGpuKernel => Conv::AmdGpuKernel,
// These API constants ought to be more specific...
Cdecl => Conv::C,
};
let mut inputs = sig.inputs();
let extra_args = if sig.abi == RustCall {
assert!(!sig.variadic && extra_args.is_empty());
match sig.inputs().last().unwrap().sty {
ty::Tuple(ref tupled_arguments) => {
inputs = &sig.inputs()[0..sig.inputs().len() - 1];
tupled_arguments
}
_ => {
bug!("argument to function with \"rust-call\" ABI \
is not a tuple");
}
}
} else {
assert!(sig.variadic || extra_args.is_empty());
extra_args
};
let target = &cx.sess().target.target;
let win_x64_gnu = target.target_os == "windows"
&& target.arch == "x86_64"
&& target.target_env == "gnu";
let linux_s390x = target.target_os == "linux"
&& target.arch == "s390x"
&& target.target_env == "gnu";
let linux_sparc64 = target.target_os == "linux"
&& target.arch == "sparc64"
&& target.target_env == "gnu";
let rust_abi = match sig.abi {
RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
_ => false
};
// Handle safe Rust thin and fat pointers.
let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
scalar: &layout::Scalar,
layout: TyLayout<'tcx, Ty<'tcx>>,
offset: Size,
is_return: bool| {
// Booleans are always an i1 that needs to be zero-extended.
if scalar.is_bool() {
attrs.set(ArgAttribute::ZExt);
return;
}
// Only pointer types handled below.
if scalar.value != layout::Pointer {
return;
}
if scalar.valid_range.start() < scalar.valid_range.end() {
if *scalar.valid_range.start() > 0 {
attrs.set(ArgAttribute::NonNull);
}
}
if let Some(pointee) = layout.pointee_info_at(cx, offset) {
if let Some(kind) = pointee.safe {
attrs.pointee_size = pointee.size;
attrs.pointee_align = Some(pointee.align);
// `Box` pointer parameters never alias because ownership is transferred
// `&mut` pointer parameters never alias other parameters,
// or mutable global data
//
// `&T` where `T` contains no `UnsafeCell<U>` is immutable,
// and can be marked as both `readonly` and `noalias`, as
// LLVM's definition of `noalias` is based solely on memory
// dependencies rather than pointer equality
let no_alias = match kind {
PointerKind::Shared => false,
PointerKind::UniqueOwned => true,
PointerKind::Frozen |
PointerKind::UniqueBorrowed => !is_return
};
if no_alias {
attrs.set(ArgAttribute::NoAlias);
}
if kind == PointerKind::Frozen && !is_return {
attrs.set(ArgAttribute::ReadOnly);
}
}
}
};
let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
let is_return = arg_idx.is_none();
let mut arg = mk_arg_type(ty, arg_idx);
if arg.layout.is_zst() {
// For some forsaken reason, x86_64-pc-windows-gnu
// doesn't ignore zero-sized struct arguments.
// The same is true for s390x-unknown-linux-gnu
// and sparc64-unknown-linux-gnu.
if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
arg.mode = PassMode::Ignore;
}
}
// FIXME(eddyb) other ABIs don't have logic for scalar pairs.
if !is_return && rust_abi {
if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
let mut a_attrs = ArgAttributes::new();
let mut b_attrs = ArgAttributes::new();
adjust_for_rust_scalar(&mut a_attrs,
a,
arg.layout,
Size::ZERO,
false);
adjust_for_rust_scalar(&mut b_attrs,
b,
arg.layout,
a.value.size(cx).align_to(b.value.align(cx).abi),
false);
arg.mode = PassMode::Pair(a_attrs, b_attrs);
return arg;
}
}
if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
if let PassMode::Direct(ref mut attrs) = arg.mode {
adjust_for_rust_scalar(attrs,
scalar,
arg.layout,
Size::ZERO, | is_return);
}
}
arg
};
let mut fn_ty = FnType {
ret: arg_of(sig.output(), None),
args: inputs.iter().chain(extra_args).enumerate().map(|(i, ty)| {
arg_of(ty, Some(i))
}).collect(),
variadic: sig.variadic,
conv,
};
fn_ty.adjust_for_abi(cx, sig.abi);
fn_ty
}
fn adjust_for_abi(&mut self,
cx: &CodegenCx<'ll, 'tcx>,
abi: Abi) {
if abi == Abi::Unadjusted { return }
if abi == Abi::Rust || abi == Abi::RustCall ||
abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
if arg.is_ignore() { return; }
match arg.layout.abi {
layout::Abi::Aggregate { .. } => {}
// This is a fun case! The gist of what this is doing is
// that we want callers and callees to always agree on the
// ABI of how they pass SIMD arguments. If we were to *not*
// make these arguments indirect then they'd be immediates
// in LLVM, which means that they'd used whatever the
// appropriate ABI is for the callee and the caller. That
// means, for example, if the caller doesn't have AVX
// enabled but the callee does, then passing an AVX argument
// across this boundary would cause corrupt data to show up.
//
// This problem is fixed by unconditionally passing SIMD
// arguments through memory between callers and callees
// which should get them all to agree on ABI regardless of
// target feature sets. Some more information about this
// issue can be found in #44367.
//
// Note that the platform intrinsic ABI is exempt here as
// that's how we connect up to LLVM and it's unstable
// anyway, we control all calls to it in libstd.
layout::Abi::Vector { .. }
if abi != Abi::PlatformIntrinsic &&
cx.sess().target.target.options.simd_types_indirect =>
{
arg.make_indirect();
return
}
_ => return
}
let size = arg.layout.size;
if arg.layout.is_unsized() || size > layout::Pointer.size(cx) {
arg.make_indirect();
} else {
// We want to pass small aggregates as immediates, but using
// a LLVM aggregate type for this leads to bad optimizations,
// so we pick an appropriately sized integer type instead.
arg.cast_to(Reg {
kind: RegKind::Integer,
size
});
}
};
fixup(&mut self.ret);
for arg in &mut self.args {
fixup(arg);
}
if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
attrs.set(ArgAttribute::StructRet);
}
return;
}
if let Err(msg) = self.adjust_for_cabi(cx, abi) {
cx.sess().fatal(&msg);
}
}
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
let args_capacity: usize = self.args.iter().map(|arg|
if arg.pad.is_some() { 1 } else { 0 } +
if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
).sum();
let mut llargument_tys = Vec::with_capacity(
if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity
);
let llreturn_ty = match self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => {
self.ret.layout.immediate_llvm_type(cx)
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(..) => {
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
cx.type_void()
}
};
for arg in &self.args {
// add padding
if let Some(ty) = arg.pad {
llargument_tys.push(ty.llvm_type(cx));
}
let llarg_ty = match arg.mode {
PassMode::Ignore => continue,
PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
PassMode::Pair(..) => {
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
PassMode::Indirect(_, Some(_)) => {
let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
let ptr_layout = cx.layout_of(ptr_ty);
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
};
llargument_tys.push(llarg_ty);
}
if self.variadic {
cx.type_variadic_func(&llargument_tys, llreturn_ty)
} else {
cx.type_func(&llargument_tys, llreturn_ty)
}
}
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
unsafe {
llvm::LLVMPointerType(self.llvm_type(cx),
cx.data_layout().instruction_address_space as c_uint)
}
}
fn llvm_cconv(&self) -> llvm::CallConv {
match self.conv {
Conv::C => llvm::CCallConv,
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
Conv::Msp430Intr => llvm::Msp430Intr,
Conv::PtxKernel => llvm::PtxKernel,
Conv::X86Fastcall => llvm::X86FastcallCallConv,
Conv::X86Intr => llvm::X86_Intr,
Conv::X86Stdcall => llvm::X86StdcallCallConv,
Conv::X86ThisCall => llvm::X86_ThisCall,
Conv::X86VectorCall => llvm::X86_VectorCall,
Conv::X86_64SysV => llvm::X86_64_SysV,
Conv::X86_64Win64 => llvm::X86_64_Win64,
}
}
fn apply_attrs_llfn(&self, llfn: &'ll Value) {
let mut i = 0;
let mut apply = |attrs: &ArgAttributes| {
attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
i += 1;
};
match self.ret.mode {
PassMode::Direct(ref attrs) => {
attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
}
PassMode::Indirect(ref attrs, _) => apply(attrs),
_ => {}
}
for arg in &self.args {
if arg.pad.is_some() {
apply(&ArgAttributes::new());
}
match arg.mode {
PassMode::Ignore => {}
PassMode::Direct(ref attrs) |
PassMode::Indirect(ref attrs, None) => apply(attrs),
PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
apply(attrs);
apply(extra_attrs);
}
PassMode::Pair(ref a, ref b) => {
apply(a);
apply(b);
}
PassMode::Cast(_) => apply(&ArgAttributes::new()),
}
}
}
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
let mut i = 0;
let mut apply = |attrs: &ArgAttributes| {
attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
i += 1;
};
match self.ret.mode {
PassMode::Direct(ref attrs) => {
attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
}
PassMode::Indirect(ref attrs, _) => apply(attrs),
_ => {}
}
if let layout::Abi::Scalar(ref scalar) = self.ret.layout.abi {
// If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier.
if let layout::Int(..) = scalar.value {
if !scalar.is_bool() {
let range = scalar.valid_range_exclusive(bx);
if range.start != range.end {
bx.range_metadata(callsite, range);
}
}
}
}
for arg in &self.args {
if arg.pad.is_some() {
apply(&ArgAttributes::new());
}
match arg.mode {
PassMode::Ignore => {}
PassMode::Direct(ref attrs) |
PassMode::Indirect(ref attrs, None) => apply(attrs),
PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
apply(attrs);
apply(extra_attrs);
}
PassMode::Pair(ref a, ref b) => {
apply(a);
apply(b);
}
PassMode::Cast(_) => apply(&ArgAttributes::new()),
}
}
let cconv = self.llvm_cconv();
if cconv != llvm::CCallConv {
llvm::SetInstructionCallConv(callsite, cconv);
}
}
}
impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> {
FnType::new(&self, sig, extra_args)
}
fn new_vtable(
&self,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]
) -> FnType<'tcx, Ty<'tcx>> {
FnType::new_vtable(&self, sig, extra_args)
}
fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> {
FnType::of_instance(&self, instance)
}
}
impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn apply_attrs_callsite(
&mut self,
ty: &FnType<'tcx, Ty<'tcx>>,
callsite: Self::Value
) {
ty.apply_attrs_callsite(self, callsite)
}
} | |
test_auth_forms.py | from unittest.mock import patch
from django.contrib.auth.models import User
from django.test import TestCase
from login.forms import CFGOVPasswordChangeForm, UserCreationForm, UserEditForm
from login.tests.test_password_policy import TestWithUser
@patch("login.forms.send_password_reset_email")
class UserCreationFormTestCase(TestCase):
def setUp(self):
self.username = self.__class__.__name__
self.email = "[email protected]"
self.userdata = {
"email": self.email,
"username": self.username,
"first_name": "George",
"last_name": "Washington",
"password1": "cherrytree",
"password2": "cherrytree",
}
def tearDown(self):
User.objects.filter(username=self.username).delete()
def test_save_sends_email(self, send_email):
form = UserCreationForm(self.userdata)
self.assertTrue(form.is_valid())
form.save(commit=True)
send_email.assert_called_once_with(self.email)
def test_save_without_commit_doesnt_send_email(self, send_email):
form = UserCreationForm(self.userdata)
self.assertTrue(form.is_valid())
form.save(commit=False)
send_email.assert_not_called()
def test_duplicate_email_fails_validation(self, send_email):
User.objects.create(username="foo", email=self.email)
form = UserCreationForm(self.userdata)
self.assertFalse(form.is_valid())
self.assertTrue(form.errors["email"])
class UserEditFormTestCase(TestCase):
def setUp(self):
self.userdata = {
"username": "george",
"email": "[email protected]",
"first_name": "george",
"last_name": "washington",
}
def test_no_edits_valid(self):
|
def test_edit_first_name(self):
user = User.objects.create(**self.userdata)
userdata2 = dict(self.userdata)
userdata2["first_name"] = "joe"
form = UserEditForm(data=userdata2, instance=user)
self.assertTrue(form.is_valid())
user = form.save()
self.assertEqual(user.first_name, "joe")
self.assertEqual(user.username, "george")
def test_duplicate_email_fails_validation(self):
User.objects.create(**self.userdata)
userdata2 = dict(self.userdata)
userdata2["username"] = "patrick"
form = UserEditForm(data=userdata2)
self.assertFalse(form.is_valid())
self.assertTrue(form.errors["email"])
def test_duplicate_emails_allowed_on_user_model(self):
User.objects.create(**self.userdata)
userdata2 = dict(self.userdata)
userdata2["username"] = "patrick"
try:
User.objects.create(**userdata2)
except Exception:
self.fail(
"users with duplicate emails are allowed, "
"just not when creating or editing via for "
)
class PasswordValidationMixinTestCase(TestWithUser):
def test_edit_password(self):
user = self.get_user(last_password="testing")
form = CFGOVPasswordChangeForm(
data={
"old_password": "testing",
"new_password1": "Testing12345!",
"new_password2": "Testing12345!",
},
user=user,
)
form.is_valid()
self.assertTrue(form.is_valid())
| user = User.objects.create(**self.userdata)
form = UserEditForm(data=self.userdata, instance=user)
self.assertTrue(form.is_valid()) |
oci_opsi_resource_forecast_trend_facts.py | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_opsi_resource_forecast_trend_facts
short_description: Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ResourceForecastTrend resource in Oracle Cloud Infrastructure
- Get Forecast predictions for CPU and Storage resources since a time in the past.
If compartmentIdInSubtree is specified, aggregates resources in a compartment and in all sub-compartments.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
compartment_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
type: str
required: true
resource_metric:
description:
- Filter by resource metric.
Supported values are CPU , STORAGE, MEMORY and IO.
type: str
required: true
analysis_time_interval:
description:
- Specify time period in ISO 8601 format with respect to current time.
Default is last 30 days represented by P30D.
If timeInterval is specified, then timeIntervalStart and timeIntervalEnd will be ignored.
Examples P90D (last 90 days), P4W (last 4 weeks), P2M (last 2 months), P1Y (last 12 months), . Maximum value allowed is 25 months prior to
current time (P25M).
type: str
time_interval_start:
description:
- Analysis start time in UTC in ISO 8601 format(inclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
The minimum allowed value is 2 years prior to the current day.
timeIntervalStart and timeIntervalEnd parameters are used together.
If analysisTimeInterval is specified, this parameter is ignored.
type: str
time_interval_end:
description:
- Analysis end time in UTC in ISO 8601 format(exclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
timeIntervalStart and timeIntervalEnd are used together.
If timeIntervalEnd is not specified, current time is used as timeIntervalEnd.
type: str
database_type:
description:
- Filter by one or more database type.
Possible values are ADW-S, ATP-S, ADW-D, ATP-D, EXTERNAL-PDB, EXTERNAL-NONCDB.
type: list
elements: str
choices:
- "ADW-S"
- "ATP-S"
- "ADW-D"
- "ATP-D"
- "EXTERNAL-PDB"
- "EXTERNAL-NONCDB"
database_id:
description:
- Optional list of database L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the associated DBaaS entity.
type: list
elements: str
id:
description:
- Optional list of database insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
exadata_insight_id:
description:
- Optional list of exadata insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
cdb_name:
description:
- Filter by one or more cdb name.
type: list
elements: str
statistic:
description:
- Choose the type of statistic metric data to be used for forecasting.
type: str
choices:
- "AVG"
- "MAX"
forecast_days:
description:
- Number of days used for utilization forecast analysis.
type: int
forecast_model:
description:
- "Choose algorithm model for the forecasting.
Possible values:
- LINEAR: Uses linear regression algorithm for forecasting.
- ML_AUTO: Automatically detects best algorithm to use for forecasting.
- ML_NO_AUTO: Automatically detects seasonality of the data for forecasting using linear or seasonal algorithm."
type: str
choices:
- "LINEAR"
- "ML_AUTO"
- "ML_NO_AUTO"
utilization_level:
description:
- "Filter by utilization level by the following buckets:
- HIGH_UTILIZATION: DBs with utilization greater or equal than 75.
- LOW_UTILIZATION: DBs with utilization lower than 25.
- MEDIUM_HIGH_UTILIZATION: DBs with utilization greater or equal than 50 but lower than 75.
- MEDIUM_LOW_UTILIZATION: DBs with utilization greater or equal than 25 but lower than 50."
type: str
choices:
- "HIGH_UTILIZATION"
- "LOW_UTILIZATION"
- "MEDIUM_HIGH_UTILIZATION"
- "MEDIUM_LOW_UTILIZATION"
confidence:
description:
- This parameter is used to change data's confidence level, this data is ingested by the
forecast algorithm.
Confidence is the probability of an interval to contain the expected population parameter.
Manipulation of this value will lead to different results.
If not set, default confidence value is 95%.
type: int
host_name:
description:
- Filter by one or more hostname.
type: list
elements: str
tablespace_name:
description:
- Tablespace name for a database
type: str
is_database_instance_level_metrics:
description:
- Flag to indicate if database instance level metrics should be returned. The flag is ignored when a host name filter is not applied.
When a hostname filter is applied this flag will determine whether to return metrics for the instances located on the specified host or for the
whole database which contains an instance on this host.
type: bool
defined_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a defined tag matching the value will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_equals:
description:
- "A list of tag filters to apply. Only resources with a freeform tag matching the value will be returned.
The key for each tag is \\"{tagName}.{value}\\". All inputs are case-insensitive.
Multiple values for the same tag name are interpreted as \\"OR\\". Values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
defined_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified defined tags exist will be returned.
Each item in the list has the format \\"{namespace}.{tagName}.true\\" (for checking existence of a defined tag)
or \\"{namespace}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for the same key (i.e. same namespace and tag name) are interpreted as \\"OR\\".
Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as \\"AND\\"."
type: list
elements: str
freeform_tag_exists:
description:
- "A list of tag existence filters to apply. Only resources for which the specified freeform tags exist the value will be returned.
The key for each tag is \\"{tagName}.true\\". All inputs are case-insensitive.
Currently, only existence (\\"true\\" at the end) is supported. Absence (\\"false\\" at the end) is not supported.
Multiple values for different tag names are interpreted as \\"AND\\"."
type: list
elements: str
compartment_id_in_subtree:
description:
- A flag to search all resources within a given compartment and all sub-compartments.
type: bool
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific resource_forecast_trend
oci_opsi_resource_forecast_trend_facts:
# required
compartment_id: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
resource_metric: resource_metric_example
# optional
analysis_time_interval: analysis_time_interval_example
time_interval_start: 2013-10-20T19:20:30+01:00
time_interval_end: 2013-10-20T19:20:30+01:00
database_type: [ "$p.getValue()" ]
database_id: [ "$p.getValue()" ]
id: [ "$p.getValue()" ]
exadata_insight_id: [ "$p.getValue()" ]
cdb_name: [ "$p.getValue()" ]
statistic: AVG
forecast_days: 56
forecast_model: LINEAR
utilization_level: HIGH_UTILIZATION
confidence: 56
host_name: [ "$p.getValue()" ]
tablespace_name: tablespace_name_example
is_database_instance_level_metrics: true
defined_tag_equals: [ "$p.getValue()" ]
freeform_tag_equals: [ "$p.getValue()" ]
defined_tag_exists: [ "$p.getValue()" ]
freeform_tag_exists: [ "$p.getValue()" ]
compartment_id_in_subtree: true
"""
RETURN = """
resource_forecast_trend:
description:
- ResourceForecastTrend resource
returned: on success
type: complex
contains:
time_interval_start:
description:
- The start timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
time_interval_end:
description:
- The end timestamp that was passed into the request.
returned: on success
type: str
sample: "2020-12-06T00:00:00.000Z"
resource_metric:
description:
- "Defines the type of resource metric (example: CPU, STORAGE)"
returned: on success
type: str
sample: STORAGE
usage_unit:
description:
- Displays usage unit ( CORES, GB)
returned: on success
type: str
sample: CORES
pattern:
description:
- Time series patterns used in the forecasting.
returned: on success
type: str
sample: LINEAR
tablespace_name:
description:
- The name of tablespace.
returned: on success
type: str
sample: tablespace_name_example
historical_data:
description:
- Time series data used for the forecast analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
projected_data:
description:
- Time series data result of the forecasting analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
high_value:
description:
- Upper uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
low_value:
description:
- Lower uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
sample: {
"time_interval_start": "2020-12-06T00:00:00.000Z",
"time_interval_end": "2020-12-06T00:00:00.000Z",
"resource_metric": "STORAGE",
"usage_unit": "CORES",
"pattern": "LINEAR",
"tablespace_name": "tablespace_name_example",
"historical_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5
}],
"projected_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5,
"high_value": 1.2,
"low_value": 1.2
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.opsi import OperationsInsightsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ResourceForecastTrendFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"compartment_id",
"resource_metric",
]
def get_resource(self):
optional_get_method_params = [
"analysis_time_interval",
"time_interval_start",
"time_interval_end",
"database_type",
"database_id",
"id",
"exadata_insight_id",
"cdb_name",
"statistic",
"forecast_days",
"forecast_model",
"utilization_level",
"confidence",
"host_name",
"tablespace_name",
"is_database_instance_level_metrics",
"defined_tag_equals",
"freeform_tag_equals",
"defined_tag_exists",
"freeform_tag_exists",
"compartment_id_in_subtree",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_get_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.call_with_backoff(
self.client.summarize_database_insight_resource_forecast_trend,
compartment_id=self.module.params.get("compartment_id"),
resource_metric=self.module.params.get("resource_metric"),
**optional_kwargs
)
ResourceForecastTrendFactsHelperCustom = get_custom_class(
"ResourceForecastTrendFactsHelperCustom"
)
class ResourceFactsHelper(
ResourceForecastTrendFactsHelperCustom, ResourceForecastTrendFactsHelperGen
):
|
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
compartment_id=dict(type="str", required=True),
resource_metric=dict(type="str", required=True),
analysis_time_interval=dict(type="str"),
time_interval_start=dict(type="str"),
time_interval_end=dict(type="str"),
database_type=dict(
type="list",
elements="str",
choices=[
"ADW-S",
"ATP-S",
"ADW-D",
"ATP-D",
"EXTERNAL-PDB",
"EXTERNAL-NONCDB",
],
),
database_id=dict(type="list", elements="str"),
id=dict(type="list", elements="str"),
exadata_insight_id=dict(type="list", elements="str"),
cdb_name=dict(type="list", elements="str"),
statistic=dict(type="str", choices=["AVG", "MAX"]),
forecast_days=dict(type="int"),
forecast_model=dict(
type="str", choices=["LINEAR", "ML_AUTO", "ML_NO_AUTO"]
),
utilization_level=dict(
type="str",
choices=[
"HIGH_UTILIZATION",
"LOW_UTILIZATION",
"MEDIUM_HIGH_UTILIZATION",
"MEDIUM_LOW_UTILIZATION",
],
),
confidence=dict(type="int"),
host_name=dict(type="list", elements="str"),
tablespace_name=dict(type="str"),
is_database_instance_level_metrics=dict(type="bool"),
defined_tag_equals=dict(type="list", elements="str"),
freeform_tag_equals=dict(type="list", elements="str"),
defined_tag_exists=dict(type="list", elements="str"),
freeform_tag_exists=dict(type="list", elements="str"),
compartment_id_in_subtree=dict(type="bool"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="resource_forecast_trend",
service_client_class=OperationsInsightsClient,
namespace="opsi",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(resource_forecast_trend=result)
if __name__ == "__main__":
main()
| pass |
test_hashlib.py | # Test hashlib module
#
# $Id$
#
# Copyright (C) 2005-2010 Gregory P. Smith ([email protected])
# Licensed to PSF under a Contributor Agreement.
#
import array
from binascii import unhexlify
import hashlib
import itertools
import os
import sys
try:
import threading
except ImportError:
threading = None
import unittest
import warnings
from test import support
from test.support import _4G, bigmemtest, import_fresh_module
from http.client import HTTPException
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
c_hashlib = import_fresh_module('hashlib', fresh=['_hashlib'])
py_hashlib = import_fresh_module('hashlib', blocked=['_hashlib'])
try:
import _blake2
except ImportError:
_blake2 = None
requires_blake2 = unittest.skipUnless(_blake2, 'requires _blake2')
try:
import _sha3
except ImportError:
_sha3 = None
requires_sha3 = unittest.skipUnless(_sha3, 'requires _sha3')
def hexstr(s):
assert isinstance(s, bytes), repr(s)
h = "0123456789abcdef"
r = ''
for i in s:
r += h[(i >> 4) & 0xF] + h[i & 0xF]
return r
URL = "http://www.pythontest.net/hashlib/{}.txt"
def read_vectors(hash_name):
url = URL.format(hash_name)
try:
testdata = support.open_urlresource(url)
except (OSError, HTTPException):
raise unittest.SkipTest("Could not retrieve {}".format(url))
with testdata:
for line in testdata:
line = line.strip()
if line.startswith('#') or not line:
continue
parts = line.split(',')
parts[0] = bytes.fromhex(parts[0])
yield parts
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
'sha224', 'SHA224', 'sha256', 'SHA256',
'sha384', 'SHA384', 'sha512', 'SHA512',
'blake2b', 'blake2s',
'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512',
'shake_128', 'shake_256')
shakes = {'shake_128', 'shake_256'}
# Issue #14693: fallback modules are always compiled under POSIX
_warn_on_extension_import = os.name == 'posix' or COMPILED_WITH_PYDEBUG
def _conditional_import_module(self, module_name):
"""Import a module and return a reference to it or None on failure."""
try:
exec('import '+module_name)
except ImportError as error:
if self._warn_on_extension_import:
warnings.warn('Did a C extension fail to compile? %s' % error)
return locals().get(module_name)
def __init__(self, *args, **kwargs):
algorithms = set()
for algorithm in self.supported_hash_names:
algorithms.add(algorithm.lower())
_blake2 = self._conditional_import_module('_blake2')
if _blake2:
algorithms.update({'blake2b', 'blake2s'})
self.constructors_to_test = {}
for algorithm in algorithms:
self.constructors_to_test[algorithm] = set()
# For each algorithm, test the direct constructor and the use
# of hashlib.new given the algorithm name.
for algorithm, constructors in self.constructors_to_test.items():
constructors.add(getattr(hashlib, algorithm))
def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm, **kwargs):
if data is None:
return hashlib.new(_alg, **kwargs)
return hashlib.new(_alg, data, **kwargs)
constructors.add(_test_algorithm_via_hashlib_new)
_hashlib = self._conditional_import_module('_hashlib')
if _hashlib:
# These two algorithms should always be present when this module
# is compiled. If not, something was compiled wrong.
self.assertTrue(hasattr(_hashlib, 'openssl_md5'))
self.assertTrue(hasattr(_hashlib, 'openssl_sha1'))
for algorithm, constructors in self.constructors_to_test.items():
constructor = getattr(_hashlib, 'openssl_'+algorithm, None)
if constructor:
constructors.add(constructor)
def add_builtin_constructor(name):
constructor = getattr(hashlib, "__get_builtin_constructor")(name)
self.constructors_to_test[name].add(constructor)
_md5 = self._conditional_import_module('_md5')
if _md5:
add_builtin_constructor('md5')
_sha1 = self._conditional_import_module('_sha1')
if _sha1:
add_builtin_constructor('sha1')
_sha256 = self._conditional_import_module('_sha256')
if _sha256:
add_builtin_constructor('sha224')
add_builtin_constructor('sha256')
_sha512 = self._conditional_import_module('_sha512')
if _sha512:
add_builtin_constructor('sha384')
add_builtin_constructor('sha512')
if _blake2:
add_builtin_constructor('blake2s')
add_builtin_constructor('blake2b')
_sha3 = self._conditional_import_module('_sha3')
if _sha3:
add_builtin_constructor('sha3_224')
add_builtin_constructor('sha3_256')
add_builtin_constructor('sha3_384')
add_builtin_constructor('sha3_512')
add_builtin_constructor('shake_128')
add_builtin_constructor('shake_256')
super(HashLibTestCase, self).__init__(*args, **kwargs)
@property
def hash_constructors(self):
constructors = self.constructors_to_test.values()
return itertools.chain.from_iterable(constructors)
def test_hash_array(self):
a = array.array("b", range(10))
for cons in self.hash_constructors:
c = cons(a)
if c.name in self.shakes:
c.hexdigest(16)
else:
c.hexdigest()
def test_algorithms_guaranteed(self):
self.assertEqual(hashlib.algorithms_guaranteed,
set(_algo for _algo in self.supported_hash_names
if _algo.islower()))
def test_algorithms_available(self):
self.assertTrue(set(hashlib.algorithms_guaranteed).
issubset(hashlib.algorithms_available))
def test_unknown_hash(self):
self.assertRaises(ValueError, hashlib.new, 'spam spam spam spam spam')
self.assertRaises(TypeError, hashlib.new, 1)
def test_get_builtin_constructor(self):
get_builtin_constructor = getattr(hashlib,
'__get_builtin_constructor')
builtin_constructor_cache = getattr(hashlib,
'__builtin_constructor_cache')
self.assertRaises(ValueError, get_builtin_constructor, 'test')
try:
import _md5
except ImportError:
pass
# This forces an ImportError for "import _md5" statements
sys.modules['_md5'] = None
# clear the cache
builtin_constructor_cache.clear()
try:
self.assertRaises(ValueError, get_builtin_constructor, 'md5')
finally:
if '_md5' in locals():
sys.modules['_md5'] = _md5
else:
del sys.modules['_md5']
self.assertRaises(TypeError, get_builtin_constructor, 3)
constructor = get_builtin_constructor('md5')
self.assertIs(constructor, _md5.md5)
self.assertEqual(sorted(builtin_constructor_cache), ['MD5', 'md5'])
def test_hexdigest(self):
for cons in self.hash_constructors:
h = cons()
if h.name in self.shakes:
self.assertIsInstance(h.digest(16), bytes)
self.assertEqual(hexstr(h.digest(16)), h.hexdigest(16))
else:
self.assertIsInstance(h.digest(), bytes)
self.assertEqual(hexstr(h.digest()), h.hexdigest())
def test_name_attribute(self):
for cons in self.hash_constructors:
h = cons()
self.assertIsInstance(h.name, str)
if h.name in self.supported_hash_names:
self.assertIn(h.name, self.supported_hash_names)
else:
self.assertNotIn(h.name, self.supported_hash_names)
self.assertEqual(h.name, hashlib.new(h.name).name)
def test_large_update(self):
aas = b'a' * 128
bees = b'b' * 127
cees = b'c' * 126
dees = b'd' * 2048 # HASHLIB_GIL_MINSIZE
for cons in self.hash_constructors:
m1 = cons()
m1.update(aas)
m1.update(bees)
m1.update(cees)
m1.update(dees)
if m1.name in self.shakes:
args = (16,)
else:
args = ()
m2 = cons()
m2.update(aas + bees + cees + dees)
self.assertEqual(m1.digest(*args), m2.digest(*args))
m3 = cons(aas + bees + cees + dees)
self.assertEqual(m1.digest(*args), m3.digest(*args))
# verify copy() doesn't touch original
m4 = cons(aas + bees + cees)
m4_digest = m4.digest(*args)
m4_copy = m4.copy()
m4_copy.update(dees)
self.assertEqual(m1.digest(*args), m4_copy.digest(*args))
self.assertEqual(m4.digest(*args), m4_digest)
def check(self, name, data, hexdigest, shake=False, **kwargs):
length = len(hexdigest)//2
hexdigest = hexdigest.lower()
constructors = self.constructors_to_test[name]
# 2 is for hashlib.name(...) and hashlib.new(name, ...)
self.assertGreaterEqual(len(constructors), 2)
for hash_object_constructor in constructors:
m = hash_object_constructor(data, **kwargs)
computed = m.hexdigest() if not shake else m.hexdigest(length)
self.assertEqual(
computed, hexdigest,
"Hash algorithm %s constructed using %s returned hexdigest"
" %r for %d byte input data that should have hashed to %r."
% (name, hash_object_constructor,
computed, len(data), hexdigest))
computed = m.digest() if not shake else m.digest(length)
digest = bytes.fromhex(hexdigest)
self.assertEqual(computed, digest)
if not shake:
self.assertEqual(len(digest), m.digest_size)
def check_no_unicode(self, algorithm_name):
# Unicode objects are not allowed as input.
constructors = self.constructors_to_test[algorithm_name]
for hash_object_constructor in constructors:
self.assertRaises(TypeError, hash_object_constructor, 'spam')
def test_no_unicode(self):
self.check_no_unicode('md5')
self.check_no_unicode('sha1')
self.check_no_unicode('sha224')
self.check_no_unicode('sha256')
self.check_no_unicode('sha384')
self.check_no_unicode('sha512')
@requires_blake2
def test_no_unicode_blake2(self):
self.check_no_unicode('blake2b')
self.check_no_unicode('blake2s')
@requires_sha3
def test_no_unicode_sha3(self):
self.check_no_unicode('sha3_224')
self.check_no_unicode('sha3_256')
self.check_no_unicode('sha3_384')
self.check_no_unicode('sha3_512')
self.check_no_unicode('shake_128')
self.check_no_unicode('shake_256')
def check_blocksize_name(self, name, block_size=0, digest_size=0,
digest_length=None):
constructors = self.constructors_to_test[name]
for hash_object_constructor in constructors:
m = hash_object_constructor()
self.assertEqual(m.block_size, block_size)
self.assertEqual(m.digest_size, digest_size)
if digest_length:
self.assertEqual(len(m.digest(digest_length)),
digest_length)
self.assertEqual(len(m.hexdigest(digest_length)),
2*digest_length)
else:
self.assertEqual(len(m.digest()), digest_size)
self.assertEqual(len(m.hexdigest()), 2*digest_size)
self.assertEqual(m.name, name)
# split for sha3_512 / _sha3.sha3 object
self.assertIn(name.split("_")[0], repr(m))
def test_blocksize_name(self):
self.check_blocksize_name('md5', 64, 16)
self.check_blocksize_name('sha1', 64, 20)
self.check_blocksize_name('sha224', 64, 28)
self.check_blocksize_name('sha256', 64, 32)
self.check_blocksize_name('sha384', 128, 48)
self.check_blocksize_name('sha512', 128, 64)
@requires_sha3
def test_blocksize_name_sha3(self):
self.check_blocksize_name('sha3_224', 144, 28)
self.check_blocksize_name('sha3_256', 136, 32)
self.check_blocksize_name('sha3_384', 104, 48)
self.check_blocksize_name('sha3_512', 72, 64)
self.check_blocksize_name('shake_128', 168, 0, 32)
self.check_blocksize_name('shake_256', 136, 0, 64)
def check_sha3(self, name, capacity, rate, suffix):
constructors = self.constructors_to_test[name]
for hash_object_constructor in constructors:
m = hash_object_constructor()
self.assertEqual(capacity + rate, 1600)
self.assertEqual(m._capacity_bits, capacity)
self.assertEqual(m._rate_bits, rate)
self.assertEqual(m._suffix, suffix)
@requires_sha3
def test_extra_sha3(self):
self.check_sha3('sha3_224', 448, 1152, b'\x06')
self.check_sha3('sha3_256', 512, 1088, b'\x06')
self.check_sha3('sha3_384', 768, 832, b'\x06')
self.check_sha3('sha3_512', 1024, 576, b'\x06')
self.check_sha3('shake_128', 256, 1344, b'\x1f')
self.check_sha3('shake_256', 512, 1088, b'\x1f')
@requires_blake2
def test_blocksize_name_blake2(self):
self.check_blocksize_name('blake2b', 128, 64)
self.check_blocksize_name('blake2s', 64, 32)
def test_case_md5_0(self):
self.check('md5', b'', 'd41d8cd98f00b204e9800998ecf8427e')
def test_case_md5_1(self):
self.check('md5', b'abc', '900150983cd24fb0d6963f7d28e17f72')
def test_case_md5_2(self):
self.check('md5',
b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
@unittest.skipIf(sys.maxsize < _4G + 5, 'test cannot run on 32-bit systems')
@bigmemtest(size=_4G + 5, memuse=1, dry_run=False)
def test_case_md5_huge(self, size):
self.check('md5', b'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
@unittest.skipIf(sys.maxsize < _4G - 1, 'test cannot run on 32-bit systems')
@bigmemtest(size=_4G - 1, memuse=1, dry_run=False)
def test_case_md5_uintmax(self, size):
self.check('md5', b'A'*size, '28138d306ff1b8281f1a9067e1a1a2b3')
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
def test_case_sha1_0(self):
self.check('sha1', b"",
"da39a3ee5e6b4b0d3255bfef95601890afd80709")
def test_case_sha1_1(self):
self.check('sha1', b"abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_sha1_2(self):
self.check('sha1',
b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_sha1_3(self):
self.check('sha1', b"a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
# use the examples from Federal Information Processing Standards
# Publication 180-2, Secure Hash Standard, 2002 August 1
# http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
def test_case_sha224_0(self):
self.check('sha224', b"",
"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f")
def test_case_sha224_1(self):
self.check('sha224', b"abc",
"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7")
def test_case_sha224_2(self):
self.check('sha224',
b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525")
def test_case_sha224_3(self):
self.check('sha224', b"a" * 1000000,
"20794655980c91d8bbb4c1ea97618a4bf03f42581948b2ee4ee7ad67")
def test_case_sha256_0(self):
self.check('sha256', b"",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
def test_case_sha256_1(self):
self.check('sha256', b"abc",
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
def test_case_sha256_2(self):
self.check('sha256',
b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1")
def test_case_sha256_3(self):
self.check('sha256', b"a" * 1000000,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0")
def test_case_sha384_0(self):
self.check('sha384', b"",
"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da"+
"274edebfe76f65fbd51ad2f14898b95b")
def test_case_sha384_1(self):
self.check('sha384', b"abc",
"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed"+
"8086072ba1e7cc2358baeca134c825a7")
def test_case_sha384_2(self):
self.check('sha384',
b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
b"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712"+
"fcc7c71a557e2db966c3e9fa91746039")
def test_case_sha384_3(self):
self.check('sha384', b"a" * 1000000,
"9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b"+
"07b8b3dc38ecc4ebae97ddd87f3d8985")
def test_case_sha512_0(self):
self.check('sha512', b"",
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce"+
"47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")
def test_case_sha512_1(self):
self.check('sha512', b"abc",
"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a"+
"2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f")
def test_case_sha512_2(self):
self.check('sha512',
b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
b"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018"+
"501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909")
def test_case_sha512_3(self):
self.check('sha512', b"a" * 1000000,
"e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973eb"+
"de0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b")
def check_blake2(self, constructor, salt_size, person_size, key_size,
digest_size, max_offset):
self.assertEqual(constructor.SALT_SIZE, salt_size)
for i in range(salt_size + 1):
constructor(salt=b'a' * i)
salt = b'a' * (salt_size + 1)
self.assertRaises(ValueError, constructor, salt=salt)
self.assertEqual(constructor.PERSON_SIZE, person_size)
for i in range(person_size+1):
constructor(person=b'a' * i)
person = b'a' * (person_size + 1)
self.assertRaises(ValueError, constructor, person=person)
self.assertEqual(constructor.MAX_DIGEST_SIZE, digest_size)
for i in range(1, digest_size + 1):
constructor(digest_size=i)
self.assertRaises(ValueError, constructor, digest_size=-1)
self.assertRaises(ValueError, constructor, digest_size=0)
self.assertRaises(ValueError, constructor, digest_size=digest_size+1)
self.assertEqual(constructor.MAX_KEY_SIZE, key_size)
for i in range(key_size+1):
constructor(key=b'a' * i)
key = b'a' * (key_size + 1)
self.assertRaises(ValueError, constructor, key=key)
self.assertEqual(constructor().hexdigest(),
constructor(key=b'').hexdigest())
for i in range(0, 256):
constructor(fanout=i)
self.assertRaises(ValueError, constructor, fanout=-1)
self.assertRaises(ValueError, constructor, fanout=256)
for i in range(1, 256):
constructor(depth=i)
self.assertRaises(ValueError, constructor, depth=-1)
self.assertRaises(ValueError, constructor, depth=0)
self.assertRaises(ValueError, constructor, depth=256)
for i in range(0, 256):
constructor(node_depth=i)
self.assertRaises(ValueError, constructor, node_depth=-1)
self.assertRaises(ValueError, constructor, node_depth=256)
for i in range(0, digest_size + 1):
constructor(inner_size=i)
self.assertRaises(ValueError, constructor, inner_size=-1)
self.assertRaises(ValueError, constructor, inner_size=digest_size+1)
constructor(leaf_size=0)
constructor(leaf_size=(1<<32)-1)
self.assertRaises(OverflowError, constructor, leaf_size=-1)
self.assertRaises(OverflowError, constructor, leaf_size=1<<32)
constructor(node_offset=0)
constructor(node_offset=max_offset)
self.assertRaises(OverflowError, constructor, node_offset=-1)
self.assertRaises(OverflowError, constructor, node_offset=max_offset+1)
constructor(
string=b'',
key=b'',
salt=b'',
person=b'',
digest_size=17,
fanout=1,
depth=1,
leaf_size=256,
node_offset=512,
node_depth=1,
inner_size=7,
last_node=True
)
def blake2_rfc7693(self, constructor, md_len, in_len):
def selftest_seq(length, seed):
mask = (1<<32)-1
a = (0xDEAD4BAD * seed) & mask
b = 1
out = bytearray(length)
for i in range(length):
t = (a + b) & mask
a, b = b, t
out[i] = (t >> 24) & 0xFF
return out
outer = constructor(digest_size=32)
for outlen in md_len:
for inlen in in_len:
indata = selftest_seq(inlen, inlen)
key = selftest_seq(outlen, outlen)
unkeyed = constructor(indata, digest_size=outlen)
outer.update(unkeyed.digest())
keyed = constructor(indata, key=key, digest_size=outlen)
outer.update(keyed.digest())
return outer.hexdigest()
@requires_blake2
def | (self):
self.check_blake2(hashlib.blake2b, 16, 16, 64, 64, (1<<64)-1)
b2b_md_len = [20, 32, 48, 64]
b2b_in_len = [0, 3, 128, 129, 255, 1024]
self.assertEqual(
self.blake2_rfc7693(hashlib.blake2b, b2b_md_len, b2b_in_len),
"c23a7800d98123bd10f506c61e29da5603d763b8bbad2e737f5e765a7bccd475")
@requires_blake2
def test_case_blake2b_0(self):
self.check('blake2b', b"",
"786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419"+
"d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce")
@requires_blake2
def test_case_blake2b_1(self):
self.check('blake2b', b"abc",
"ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d1"+
"7d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923")
@requires_blake2
def test_blake2b_vectors(self):
for msg, key, md in read_vectors('blake2b'):
key = bytes.fromhex(key)
self.check('blake2b', msg, md, key=key)
@requires_blake2
def test_blake2s(self):
self.check_blake2(hashlib.blake2s, 8, 8, 32, 32, (1<<48)-1)
b2s_md_len = [16, 20, 28, 32]
b2s_in_len = [0, 3, 64, 65, 255, 1024]
self.assertEqual(
self.blake2_rfc7693(hashlib.blake2s, b2s_md_len, b2s_in_len),
"6a411f08ce25adcdfb02aba641451cec53c598b24f4fc787fbdc88797f4c1dfe")
@requires_blake2
def test_case_blake2s_0(self):
self.check('blake2s', b"",
"69217a3079908094e11121d042354a7c1f55b6482ca1a51e1b250dfd1ed0eef9")
@requires_blake2
def test_case_blake2s_1(self):
self.check('blake2s', b"abc",
"508c5e8c327c14e2e1a72ba34eeb452f37458b209ed63a294d999b4c86675982")
@requires_blake2
def test_blake2s_vectors(self):
for msg, key, md in read_vectors('blake2s'):
key = bytes.fromhex(key)
self.check('blake2s', msg, md, key=key)
@requires_sha3
def test_case_sha3_224_0(self):
self.check('sha3_224', b"",
"6b4e03423667dbb73b6e15454f0eb1abd4597f9a1b078e3f5b5a6bc7")
@requires_sha3
def test_case_sha3_224_vector(self):
for msg, md in read_vectors('sha3_224'):
self.check('sha3_224', msg, md)
@requires_sha3
def test_case_sha3_256_0(self):
self.check('sha3_256', b"",
"a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a")
@requires_sha3
def test_case_sha3_256_vector(self):
for msg, md in read_vectors('sha3_256'):
self.check('sha3_256', msg, md)
@requires_sha3
def test_case_sha3_384_0(self):
self.check('sha3_384', b"",
"0c63a75b845e4f7d01107d852e4c2485c51a50aaaa94fc61995e71bbee983a2a"+
"c3713831264adb47fb6bd1e058d5f004")
@requires_sha3
def test_case_sha3_384_vector(self):
for msg, md in read_vectors('sha3_384'):
self.check('sha3_384', msg, md)
@requires_sha3
def test_case_sha3_512_0(self):
self.check('sha3_512', b"",
"a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a6"+
"15b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26")
@requires_sha3
def test_case_sha3_512_vector(self):
for msg, md in read_vectors('sha3_512'):
self.check('sha3_512', msg, md)
@requires_sha3
def test_case_shake_128_0(self):
self.check('shake_128', b"",
"7f9c2ba4e88f827d616045507605853ed73b8093f6efbc88eb1a6eacfa66ef26",
True)
self.check('shake_128', b"", "7f9c", True)
@requires_sha3
def test_case_shake128_vector(self):
for msg, md in read_vectors('shake_128'):
self.check('shake_128', msg, md, True)
@requires_sha3
def test_case_shake_256_0(self):
self.check('shake_256', b"",
"46b9dd2b0ba88d13233b3feb743eeb243fcd52ea62b81b82b50c27646ed5762f",
True)
self.check('shake_256', b"", "46b9", True)
@requires_sha3
def test_case_shake256_vector(self):
for msg, md in read_vectors('shake_256'):
self.check('shake_256', msg, md, True)
def test_gil(self):
# Check things work fine with an input larger than the size required
# for multithreaded operation (which is hardwired to 2048).
gil_minsize = 2048
for cons in self.hash_constructors:
m = cons()
m.update(b'1')
m.update(b'#' * gil_minsize)
m.update(b'1')
m = cons(b'x' * gil_minsize)
m.update(b'1')
m = hashlib.md5()
m.update(b'1')
m.update(b'#' * gil_minsize)
m.update(b'1')
self.assertEqual(m.hexdigest(), 'cb1e1a2cbc80be75e19935d621fb9b21')
m = hashlib.md5(b'x' * gil_minsize)
self.assertEqual(m.hexdigest(), 'cfb767f225d58469c5de3632a8803958')
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_threaded_hashing(self):
# Updating the same hash object from several threads at once
# using data chunk sizes containing the same byte sequences.
#
# If the internal locks are working to prevent multiple
# updates on the same object from running at once, the resulting
# hash will be the same as doing it single threaded upfront.
hasher = hashlib.sha1()
num_threads = 5
smallest_data = b'swineflu'
data = smallest_data*200000
expected_hash = hashlib.sha1(data*num_threads).hexdigest()
def hash_in_chunks(chunk_size, event):
index = 0
while index < len(data):
hasher.update(data[index:index+chunk_size])
index += chunk_size
event.set()
events = []
for threadnum in range(num_threads):
chunk_size = len(data) // (10**threadnum)
self.assertGreater(chunk_size, 0)
self.assertEqual(chunk_size % len(smallest_data), 0)
event = threading.Event()
events.append(event)
threading.Thread(target=hash_in_chunks,
args=(chunk_size, event)).start()
for event in events:
event.wait()
self.assertEqual(expected_hash, hasher.hexdigest())
class KDFTests(unittest.TestCase):
pbkdf2_test_vectors = [
(b'password', b'salt', 1, None),
(b'password', b'salt', 2, None),
(b'password', b'salt', 4096, None),
# too slow, it takes over a minute on a fast CPU.
#(b'password', b'salt', 16777216, None),
(b'passwordPASSWORDpassword', b'saltSALTsaltSALTsaltSALTsaltSALTsalt',
4096, -1),
(b'pass\0word', b'sa\0lt', 4096, 16),
]
scrypt_test_vectors = [
(b'', b'', 16, 1, 1, unhexlify('77d6576238657b203b19ca42c18a0497f16b4844e3074ae8dfdffa3fede21442fcd0069ded0948f8326a753a0fc81f17e8d3e0fb2e0d3628cf35e20c38d18906')),
(b'password', b'NaCl', 1024, 8, 16, unhexlify('fdbabe1c9d3472007856e7190d01e9fe7c6ad7cbc8237830e77376634b3731622eaf30d92e22a3886ff109279d9830dac727afb94a83ee6d8360cbdfa2cc0640')),
(b'pleaseletmein', b'SodiumChloride', 16384, 8, 1, unhexlify('7023bdcb3afd7348461c06cd81fd38ebfda8fbba904f8e3ea9b543f6545da1f2d5432955613f0fcf62d49705242a9af9e61e85dc0d651e40dfcf017b45575887')),
]
pbkdf2_results = {
"sha1": [
# official test vectors from RFC 6070
(bytes.fromhex('0c60c80f961f0e71f3a9b524af6012062fe037a6'), None),
(bytes.fromhex('ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957'), None),
(bytes.fromhex('4b007901b765489abead49d926f721d065a429c1'), None),
#(bytes.fromhex('eefe3d61cd4da4e4e9945b3d6ba2158c2634e984'), None),
(bytes.fromhex('3d2eec4fe41c849b80c8d83662c0e44a8b291a964c'
'f2f07038'), 25),
(bytes.fromhex('56fa6aa75548099dcc37d7f03425e0c3'), None),],
"sha256": [
(bytes.fromhex('120fb6cffcf8b32c43e7225256c4f837'
'a86548c92ccc35480805987cb70be17b'), None),
(bytes.fromhex('ae4d0c95af6b46d32d0adff928f06dd0'
'2a303f8ef3c251dfd6e2d85a95474c43'), None),
(bytes.fromhex('c5e478d59288c841aa530db6845c4c8d'
'962893a001ce4e11a4963873aa98134a'), None),
#(bytes.fromhex('cf81c66fe8cfc04d1f31ecb65dab4089'
# 'f7f179e89b3b0bcb17ad10e3ac6eba46'), None),
(bytes.fromhex('348c89dbcbd32b2f32d814b8116e84cf2b17'
'347ebc1800181c4e2a1fb8dd53e1c635518c7dac47e9'), 40),
(bytes.fromhex('89b69d0516f829893c696226650a8687'), None),],
"sha512": [
(bytes.fromhex('867f70cf1ade02cff3752599a3a53dc4af34c7a669815ae5'
'd513554e1c8cf252c02d470a285a0501bad999bfe943c08f'
'050235d7d68b1da55e63f73b60a57fce'), None),
(bytes.fromhex('e1d9c16aa681708a45f5c7c4e215ceb66e011a2e9f004071'
'3f18aefdb866d53cf76cab2868a39b9f7840edce4fef5a82'
'be67335c77a6068e04112754f27ccf4e'), None),
(bytes.fromhex('d197b1b33db0143e018b12f3d1d1479e6cdebdcc97c5c0f8'
'7f6902e072f457b5143f30602641b3d55cd335988cb36b84'
'376060ecd532e039b742a239434af2d5'), None),
(bytes.fromhex('8c0511f4c6e597c6ac6315d8f0362e225f3c501495ba23b8'
'68c005174dc4ee71115b59f9e60cd9532fa33e0f75aefe30'
'225c583a186cd82bd4daea9724a3d3b8'), 64),
(bytes.fromhex('9d9e9c4cd21fe4be24d5b8244c759665'), None),],
}
def _test_pbkdf2_hmac(self, pbkdf2):
for digest_name, results in self.pbkdf2_results.items():
for i, vector in enumerate(self.pbkdf2_test_vectors):
password, salt, rounds, dklen = vector
expected, overwrite_dklen = results[i]
if overwrite_dklen:
dklen = overwrite_dklen
out = pbkdf2(digest_name, password, salt, rounds, dklen)
self.assertEqual(out, expected,
(digest_name, password, salt, rounds, dklen))
out = pbkdf2(digest_name, memoryview(password),
memoryview(salt), rounds, dklen)
out = pbkdf2(digest_name, bytearray(password),
bytearray(salt), rounds, dklen)
self.assertEqual(out, expected)
if dklen is None:
out = pbkdf2(digest_name, password, salt, rounds)
self.assertEqual(out, expected,
(digest_name, password, salt, rounds))
self.assertRaises(TypeError, pbkdf2, b'sha1', b'pass', b'salt', 1)
self.assertRaises(TypeError, pbkdf2, 'sha1', 'pass', 'salt', 1)
self.assertRaises(ValueError, pbkdf2, 'sha1', b'pass', b'salt', 0)
self.assertRaises(ValueError, pbkdf2, 'sha1', b'pass', b'salt', -1)
self.assertRaises(ValueError, pbkdf2, 'sha1', b'pass', b'salt', 1, 0)
self.assertRaises(ValueError, pbkdf2, 'sha1', b'pass', b'salt', 1, -1)
with self.assertRaisesRegex(ValueError, 'unsupported hash type'):
pbkdf2('unknown', b'pass', b'salt', 1)
out = pbkdf2(hash_name='sha1', password=b'password', salt=b'salt',
iterations=1, dklen=None)
self.assertEqual(out, self.pbkdf2_results['sha1'][0][0])
def test_pbkdf2_hmac_py(self):
self._test_pbkdf2_hmac(py_hashlib.pbkdf2_hmac)
@unittest.skipUnless(hasattr(c_hashlib, 'pbkdf2_hmac'),
' test requires OpenSSL > 1.0')
def test_pbkdf2_hmac_c(self):
self._test_pbkdf2_hmac(c_hashlib.pbkdf2_hmac)
@unittest.skipUnless(hasattr(c_hashlib, 'scrypt'),
' test requires OpenSSL > 1.1')
def test_scrypt(self):
for password, salt, n, r, p, expected in self.scrypt_test_vectors:
result = hashlib.scrypt(password, salt=salt, n=n, r=r, p=p)
self.assertEqual(result, expected)
# this values should work
hashlib.scrypt(b'password', salt=b'salt', n=2, r=8, p=1)
# password and salt must be bytes-like
with self.assertRaises(TypeError):
hashlib.scrypt('password', salt=b'salt', n=2, r=8, p=1)
with self.assertRaises(TypeError):
hashlib.scrypt(b'password', salt='salt', n=2, r=8, p=1)
# require keyword args
with self.assertRaises(TypeError):
hashlib.scrypt(b'password')
with self.assertRaises(TypeError):
hashlib.scrypt(b'password', b'salt')
with self.assertRaises(TypeError):
hashlib.scrypt(b'password', 2, 8, 1, salt=b'salt')
for n in [-1, 0, 1, None]:
with self.assertRaises((ValueError, OverflowError, TypeError)):
hashlib.scrypt(b'password', salt=b'salt', n=n, r=8, p=1)
for r in [-1, 0, None]:
with self.assertRaises((ValueError, OverflowError, TypeError)):
hashlib.scrypt(b'password', salt=b'salt', n=2, r=r, p=1)
for p in [-1, 0, None]:
with self.assertRaises((ValueError, OverflowError, TypeError)):
hashlib.scrypt(b'password', salt=b'salt', n=2, r=8, p=p)
for maxmem in [-1, None]:
with self.assertRaises((ValueError, OverflowError, TypeError)):
hashlib.scrypt(b'password', salt=b'salt', n=2, r=8, p=1,
maxmem=maxmem)
for dklen in [-1, None]:
with self.assertRaises((ValueError, OverflowError, TypeError)):
hashlib.scrypt(b'password', salt=b'salt', n=2, r=8, p=1,
dklen=dklen)
if __name__ == "__main__":
unittest.main()
| test_blake2b |
RoomView.tsx | /*
Copyright 2015, 2016 OpenMarket Ltd
Copyright 2017 Vector Creations Ltd
Copyright 2018, 2019 New Vector Ltd
Copyright 2019 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO: This component is enormous! There's several things which could stand-alone:
// - Search results component
// - Drag and drop
import React, { createRef } from 'react';
import classNames from 'classnames';
import { IRecommendedVersion, NotificationCountType, Room } from "matrix-js-sdk/src/models/room";
import { MatrixEvent } from "matrix-js-sdk/src/models/event";
import { EventSubscription } from "fbemitter";
import { ISearchResults } from 'matrix-js-sdk/src/@types/search';
import { logger } from "matrix-js-sdk/src/logger";
import { EventTimeline } from 'matrix-js-sdk/src/models/event-timeline';
import { EventType } from 'matrix-js-sdk/src/@types/event';
import { RoomState } from 'matrix-js-sdk/src/models/room-state';
import { CallState, CallType, MatrixCall } from "matrix-js-sdk/src/webrtc/call";
import { throttle } from "lodash";
import { MatrixError } from 'matrix-js-sdk/src/http-api';
import shouldHideEvent from '../../shouldHideEvent';
import { _t } from '../../languageHandler';
import { RoomPermalinkCreator } from '../../utils/permalinks/Permalinks';
import ResizeNotifier from '../../utils/ResizeNotifier';
import ContentMessages from '../../ContentMessages';
import Modal from '../../Modal';
import CallHandler, { CallHandlerEvent } from '../../CallHandler';
import dis from '../../dispatcher/dispatcher';
import * as Rooms from '../../Rooms';
import eventSearch, { searchPagination } from '../../Searching';
import MainSplit from './MainSplit';
import RightPanel from './RightPanel';
import RoomViewStore from '../../stores/RoomViewStore';
import RoomScrollStateStore, { ScrollState } from '../../stores/RoomScrollStateStore';
import WidgetEchoStore from '../../stores/WidgetEchoStore';
import SettingsStore from "../../settings/SettingsStore";
import { Layout } from "../../settings/enums/Layout";
import AccessibleButton from "../views/elements/AccessibleButton";
import RightPanelStore from "../../stores/right-panel/RightPanelStore";
import { haveTileForEvent } from "../views/rooms/EventTile";
import RoomContext, { TimelineRenderingType } from "../../contexts/RoomContext";
import MatrixClientContext, { MatrixClientProps, withMatrixClientHOC } from "../../contexts/MatrixClientContext";
import { E2EStatus, shieldStatusForRoom } from '../../utils/ShieldUtils';
import { Action } from "../../dispatcher/actions";
import { IMatrixClientCreds } from "../../MatrixClientPeg";
import ScrollPanel from "./ScrollPanel";
import TimelinePanel from "./TimelinePanel";
import ErrorBoundary from "../views/elements/ErrorBoundary";
import RoomPreviewBar from "../views/rooms/RoomPreviewBar";
import SearchBar, { SearchScope } from "../views/rooms/SearchBar";
import RoomUpgradeWarningBar from "../views/rooms/RoomUpgradeWarningBar";
import AuxPanel from "../views/rooms/AuxPanel";
import RoomHeader from "../views/rooms/RoomHeader";
import { XOR } from "../../@types/common";
import { IOOBData, IThreepidInvite } from "../../stores/ThreepidInviteStore";
import EffectsOverlay from "../views/elements/EffectsOverlay";
import { containsEmoji } from '../../effects/utils';
import { CHAT_EFFECTS } from '../../effects';
import WidgetStore from "../../stores/WidgetStore";
import { UPDATE_EVENT } from "../../stores/AsyncStore";
import Notifier from "../../Notifier";
import { showToast as showNotificationsToast } from "../../toasts/DesktopNotificationsToast";
import { RoomNotificationStateStore } from "../../stores/notifications/RoomNotificationStateStore";
import { Container, WidgetLayoutStore } from "../../stores/widgets/WidgetLayoutStore";
import { getKeyBindingsManager, RoomAction } from '../../KeyBindingsManager';
import { objectHasDiff } from "../../utils/objects";
import SpaceRoomView from "./SpaceRoomView";
import { IOpts } from "../../createRoom";
import { replaceableComponent } from "../../utils/replaceableComponent";
import EditorStateTransfer from "../../utils/EditorStateTransfer";
import ErrorDialog from '../views/dialogs/ErrorDialog';
import SearchResultTile from '../views/rooms/SearchResultTile';
import Spinner from "../views/elements/Spinner";
import UploadBar from './UploadBar';
import RoomStatusBar from "./RoomStatusBar";
import MessageComposer from '../views/rooms/MessageComposer';
import JumpToBottomButton from "../views/rooms/JumpToBottomButton";
import TopUnreadMessagesBar from "../views/rooms/TopUnreadMessagesBar";
import SpaceStore from "../../stores/spaces/SpaceStore";
import { showThread } from '../../dispatcher/dispatch-actions/threads';
import { fetchInitialEvent } from "../../utils/EventUtils";
import { ComposerType } from "../../dispatcher/payloads/ComposerInsertPayload";
import AppsDrawer from '../views/rooms/AppsDrawer';
import { RightPanelPhases } from '../../stores/right-panel/RightPanelStorePhases';
import { ActionPayload } from "../../dispatcher/payloads";
const DEBUG = false;
let debuglog = function(msg: string) {};
const BROWSER_SUPPORTS_SANDBOX = 'sandbox' in document.createElement('iframe');
if (DEBUG) {
// using bind means that we get to keep useful line numbers in the console
debuglog = logger.log.bind(console);
}
interface IRoomProps extends MatrixClientProps {
threepidInvite: IThreepidInvite;
oobData?: IOOBData;
resizeNotifier: ResizeNotifier;
justCreatedOpts?: IOpts;
forceTimeline?: boolean; // should we force access to the timeline, overriding (for eg) spaces
// Called with the credentials of a registered user (if they were a ROU that transitioned to PWLU)
onRegistered?(credentials: IMatrixClientCreds): void;
}
// This defines the content of the mainSplit.
// If the mainSplit does not contain the Timeline, the chat is shown in the right panel.
enum MainSplitContentType {
Timeline,
MaximisedWidget,
// Video
}
export interface IRoomState {
room?: Room;
roomId?: string;
roomAlias?: string;
roomLoading: boolean;
peekLoading: boolean;
shouldPeek: boolean;
// used to trigger a rerender in TimelinePanel once the members are loaded,
// so RR are rendered again (now with the members available), ...
membersLoaded: boolean;
// The event to be scrolled to initially
initialEventId?: string;
// The offset in pixels from the event with which to scroll vertically
initialEventPixelOffset?: number;
// Whether to highlight the event scrolled to
isInitialEventHighlighted?: boolean;
replyToEvent?: MatrixEvent;
numUnreadMessages: number;
draggingFile: boolean;
searching: boolean;
searchTerm?: string;
searchScope?: SearchScope;
searchResults?: XOR<{}, ISearchResults>;
searchHighlights?: string[];
searchInProgress?: boolean;
callState?: CallState;
guestsCanJoin: boolean;
canPeek: boolean;
showApps: boolean;
isPeeking: boolean;
showRightPanel: boolean;
// error object, as from the matrix client/server API
// If we failed to load information about the room,
// store the error here.
roomLoadError?: MatrixError;
// Have we sent a request to join the room that we're waiting to complete?
joining: boolean;
// this is true if we are fully scrolled-down, and are looking at
// the end of the live timeline. It has the effect of hiding the
// 'scroll to bottom' knob, among a couple of other things.
atEndOfLiveTimeline: boolean;
// used by componentDidUpdate to avoid unnecessary checks
atEndOfLiveTimelineInit: boolean;
showTopUnreadMessagesBar: boolean;
statusBarVisible: boolean;
// We load this later by asking the js-sdk to suggest a version for us.
// This object is the result of Room#getRecommendedVersion()
upgradeRecommendation?: IRecommendedVersion;
canReact: boolean;
canReply: boolean;
layout: Layout;
lowBandwidth: boolean;
alwaysShowTimestamps: boolean;
showTwelveHourTimestamps: boolean;
readMarkerInViewThresholdMs: number;
readMarkerOutOfViewThresholdMs: number;
showHiddenEventsInTimeline: boolean;
showReadReceipts: boolean;
showRedactions: boolean;
showJoinLeaves: boolean;
showAvatarChanges: boolean;
showDisplaynameChanges: boolean;
matrixClientIsReady: boolean;
showUrlPreview?: boolean;
e2eStatus?: E2EStatus;
rejecting?: boolean;
rejectError?: Error;
hasPinnedWidgets?: boolean;
mainSplitContentType?: MainSplitContentType;
dragCounter: number;
// whether or not a spaces context switch brought us here,
// if it did we don't want the room to be marked as read as soon as it is loaded.
wasContextSwitch?: boolean;
editState?: EditorStateTransfer;
timelineRenderingType: TimelineRenderingType;
liveTimeline?: EventTimeline;
}
@replaceableComponent("structures.RoomView")
export class RoomView extends React.Component<IRoomProps, IRoomState> {
private readonly dispatcherRef: string;
private readonly roomStoreToken: EventSubscription;
private settingWatchers: string[];
private unmounted = false;
private permalinkCreators: Record<string, RoomPermalinkCreator> = {};
private searchId: number;
private roomView = createRef<HTMLElement>();
private searchResultsPanel = createRef<ScrollPanel>();
private messagePanel: TimelinePanel;
static contextType = MatrixClientContext;
constructor(props, context) {
super(props, context);
const llMembers = this.context.hasLazyLoadMembersEnabled();
this.state = {
roomId: null,
roomLoading: true,
peekLoading: false,
shouldPeek: true,
membersLoaded: !llMembers,
numUnreadMessages: 0,
draggingFile: false,
searching: false,
searchResults: null,
callState: null,
guestsCanJoin: false,
canPeek: false,
showApps: false,
isPeeking: false,
showRightPanel: RightPanelStore.instance.isOpenForRoom,
joining: false,
atEndOfLiveTimeline: true,
atEndOfLiveTimelineInit: false,
showTopUnreadMessagesBar: false,
statusBarVisible: false,
canReact: false,
canReply: false,
layout: SettingsStore.getValue("layout"),
lowBandwidth: SettingsStore.getValue("lowBandwidth"),
alwaysShowTimestamps: SettingsStore.getValue("alwaysShowTimestamps"),
showTwelveHourTimestamps: SettingsStore.getValue("showTwelveHourTimestamps"),
readMarkerInViewThresholdMs: SettingsStore.getValue("readMarkerInViewThresholdMs"),
readMarkerOutOfViewThresholdMs: SettingsStore.getValue("readMarkerOutOfViewThresholdMs"),
showHiddenEventsInTimeline: SettingsStore.getValue("showHiddenEventsInTimeline"),
showReadReceipts: true,
showRedactions: true,
showJoinLeaves: true,
showAvatarChanges: true,
showDisplaynameChanges: true,
matrixClientIsReady: this.context && this.context.isInitialSyncComplete(),
mainSplitContentType: MainSplitContentType.Timeline,
dragCounter: 0,
timelineRenderingType: TimelineRenderingType.Room,
liveTimeline: undefined,
};
this.dispatcherRef = dis.register(this.onAction);
this.context.on("Room", this.onRoom);
this.context.on("Room.timeline", this.onRoomTimeline);
this.context.on("Room.name", this.onRoomName);
this.context.on("Room.accountData", this.onRoomAccountData);
this.context.on("RoomState.events", this.onRoomStateEvents);
this.context.on("RoomState.members", this.onRoomStateMember);
this.context.on("Room.myMembership", this.onMyMembership);
this.context.on("accountData", this.onAccountData);
this.context.on("crypto.keyBackupStatus", this.onKeyBackupStatus);
this.context.on("deviceVerificationChanged", this.onDeviceVerificationChanged);
this.context.on("userTrustStatusChanged", this.onUserVerificationChanged);
this.context.on("crossSigning.keysChanged", this.onCrossSigningKeysChanged);
this.context.on("Event.decrypted", this.onEventDecrypted);
// Start listening for RoomViewStore updates
this.roomStoreToken = RoomViewStore.addListener(this.onRoomViewStoreUpdate);
RightPanelStore.instance.on(UPDATE_EVENT, this.onRightPanelStoreUpdate);
WidgetEchoStore.on(UPDATE_EVENT, this.onWidgetEchoStoreUpdate);
WidgetStore.instance.on(UPDATE_EVENT, this.onWidgetStoreUpdate);
this.settingWatchers = [
SettingsStore.watchSetting("layout", null, (...[,,, value]) =>
this.setState({ layout: value as Layout }),
),
SettingsStore.watchSetting("lowBandwidth", null, (...[,,, value]) =>
this.setState({ lowBandwidth: value as boolean }),
),
SettingsStore.watchSetting("alwaysShowTimestamps", null, (...[,,, value]) =>
this.setState({ alwaysShowTimestamps: value as boolean }),
),
SettingsStore.watchSetting("showTwelveHourTimestamps", null, (...[,,, value]) =>
this.setState({ showTwelveHourTimestamps: value as boolean }),
),
SettingsStore.watchSetting("readMarkerInViewThresholdMs", null, (...[,,, value]) =>
this.setState({ readMarkerInViewThresholdMs: value as number }),
),
SettingsStore.watchSetting("readMarkerOutOfViewThresholdMs", null, (...[,,, value]) =>
this.setState({ readMarkerOutOfViewThresholdMs: value as number }),
),
SettingsStore.watchSetting("showHiddenEventsInTimeline", null, (...[,,, value]) =>
this.setState({ showHiddenEventsInTimeline: value as boolean }),
),
];
}
private onWidgetStoreUpdate = () => {
if (!this.state.room) return;
this.checkWidgets(this.state.room);
};
private onWidgetEchoStoreUpdate = () => {
if (!this.state.room) return;
this.checkWidgets(this.state.room);
};
private onWidgetLayoutChange = () => {
if (!this.state.room) return;
dis.dispatch({
action: "appsDrawer",
show: true,
});
if (WidgetLayoutStore.instance.hasMaximisedWidget(this.state.room)) {
// Show chat in right panel when a widget is maximised
RightPanelStore.instance.setCard({ phase: RightPanelPhases.Timeline });
} else if (
RightPanelStore.instance.isOpenForRoom &&
RightPanelStore.instance.roomPhaseHistory.some(card => (card.phase === RightPanelPhases.Timeline))
) {
// hide chat in right panel when the widget is minimized
RightPanelStore.instance.setCard({ phase: RightPanelPhases.RoomSummary });
RightPanelStore.instance.togglePanel();
}
this.checkWidgets(this.state.room);
};
private checkWidgets = (room) => {
this.setState({
hasPinnedWidgets: WidgetLayoutStore.instance.hasPinnedWidgets(room),
mainSplitContentType: this.getMainSplitContentType(room),
showApps: this.shouldShowApps(room),
});
};
private getMainSplitContentType = (room) => {
// TODO-video check if video should be displayed in main panel
return (WidgetLayoutStore.instance.hasMaximisedWidget(room))
? MainSplitContentType.MaximisedWidget
: MainSplitContentType.Timeline;
};
private onReadReceiptsChange = () => {
this.setState({
showReadReceipts: SettingsStore.getValue("showReadReceipts", this.state.roomId),
});
};
private onRoomViewStoreUpdate = async (initial?: boolean): Promise<void> => {
if (this.unmounted) {
return;
}
if (!initial && this.state.roomId !== RoomViewStore.getRoomId()) {
// RoomView explicitly does not support changing what room
// is being viewed: instead it should just be re-mounted when
// switching rooms. Therefore, if the room ID changes, we
// ignore this. We either need to do this or add code to handle
// saving the scroll position (otherwise we end up saving the
// scroll position against the wrong room).
// Given that doing the setState here would cause a bunch of
// unnecessary work, we just ignore the change since we know
// that if the current room ID has changed from what we thought
// it was, it means we're about to be unmounted.
return;
}
const roomId = RoomViewStore.getRoomId();
const newState: Pick<IRoomState, any> = {
roomId,
roomAlias: RoomViewStore.getRoomAlias(),
roomLoading: RoomViewStore.isRoomLoading(),
roomLoadError: RoomViewStore.getRoomLoadError(),
joining: RoomViewStore.isJoining(),
replyToEvent: RoomViewStore.getQuotingEvent(),
// we should only peek once we have a ready client
shouldPeek: this.state.matrixClientIsReady && RoomViewStore.shouldPeek(),
showReadReceipts: SettingsStore.getValue("showReadReceipts", roomId),
showRedactions: SettingsStore.getValue("showRedactions", roomId),
showJoinLeaves: SettingsStore.getValue("showJoinLeaves", roomId),
showAvatarChanges: SettingsStore.getValue("showAvatarChanges", roomId),
showDisplaynameChanges: SettingsStore.getValue("showDisplaynameChanges", roomId),
wasContextSwitch: RoomViewStore.getWasContextSwitch(),
initialEventId: null, // default to clearing this, will get set later in the method if needed
};
const initialEventId = RoomViewStore.getInitialEventId();
if (initialEventId) {
const room = this.context.getRoom(roomId);
let initialEvent = room?.findEventById(initialEventId);
// The event does not exist in the current sync data
// We need to fetch it to know whether to route this request
// to the main timeline or to a threaded one
// In the current state, if a thread does not exist in the sync data
// We will only display the event targeted by the `matrix.to` link
// and the root event.
// The rest will be lost for now, until the aggregation API on the server
// becomes available to fetch a whole thread
if (!initialEvent) {
initialEvent = await fetchInitialEvent(
this.context,
roomId,
initialEventId,
);
}
const thread = initialEvent?.getThread();
if (thread && !initialEvent?.isThreadRoot) {
showThread({
rootEvent: thread.rootEvent,
initialEvent,
highlighted: RoomViewStore.isInitialEventHighlighted(),
});
} else {
newState.initialEventId = initialEventId;
newState.isInitialEventHighlighted = RoomViewStore.isInitialEventHighlighted();
if (thread && initialEvent?.isThreadRoot) {
showThread({
rootEvent: thread.rootEvent,
initialEvent,
highlighted: RoomViewStore.isInitialEventHighlighted(),
});
}
}
}
// Add watchers for each of the settings we just looked up
this.settingWatchers = this.settingWatchers.concat([
SettingsStore.watchSetting("showReadReceipts", roomId, (...[,,, value]) =>
this.setState({ showReadReceipts: value as boolean }),
),
SettingsStore.watchSetting("showRedactions", roomId, (...[,,, value]) =>
this.setState({ showRedactions: value as boolean }),
),
SettingsStore.watchSetting("showJoinLeaves", roomId, (...[,,, value]) =>
this.setState({ showJoinLeaves: value as boolean }),
),
SettingsStore.watchSetting("showAvatarChanges", roomId, (...[,,, value]) =>
this.setState({ showAvatarChanges: value as boolean }),
),
SettingsStore.watchSetting("showDisplaynameChanges", roomId, (...[,,, value]) =>
this.setState({ showDisplaynameChanges: value as boolean }),
),
]);
if (!initial && this.state.shouldPeek && !newState.shouldPeek) {
// Stop peeking because we have joined this room now
this.context.stopPeeking();
}
// Temporary logging to diagnose https://github.com/vector-im/element-web/issues/4307
logger.log(
'RVS update:',
newState.roomId,
newState.roomAlias,
'loading?', newState.roomLoading,
'joining?', newState.joining,
'initial?', initial,
'shouldPeek?', newState.shouldPeek,
);
// NB: This does assume that the roomID will not change for the lifetime of
// the RoomView instance
if (initial) {
newState.room = this.context.getRoom(newState.roomId);
if (newState.room) {
newState.showApps = this.shouldShowApps(newState.room);
this.onRoomLoaded(newState.room);
}
}
if (this.state.roomId === null && newState.roomId !== null) {
// Get the scroll state for the new room
// If an event ID wasn't specified, default to the one saved for this room
// in the scroll state store. Assume initialEventPixelOffset should be set.
if (!newState.initialEventId) {
const roomScrollState = RoomScrollStateStore.getScrollState(newState.roomId);
if (roomScrollState) {
newState.initialEventId = roomScrollState.focussedEvent;
newState.initialEventPixelOffset = roomScrollState.pixelOffset;
}
}
}
// Clear the search results when clicking a search result (which changes the
// currently scrolled to event, this.state.initialEventId).
if (this.state.initialEventId !== newState.initialEventId) {
newState.searchResults = null;
}
this.setState(newState);
// At this point, newState.roomId could be null (e.g. the alias might not
// have been resolved yet) so anything called here must handle this case.
// We pass the new state into this function for it to read: it needs to
// observe the new state but we don't want to put it in the setState
// callback because this would prevent the setStates from being batched,
// ie. cause it to render RoomView twice rather than the once that is necessary.
if (initial) {
this.setupRoom(newState.room, newState.roomId, newState.joining, newState.shouldPeek);
}
};
private getRoomId = () => {
// According to `onRoomViewStoreUpdate`, `state.roomId` can be null
// if we have a room alias we haven't resolved yet. To work around this,
// first we'll try the room object if it's there, and then fallback to
// the bare room ID. (We may want to update `state.roomId` after
// resolving aliases, so we could always trust it.)
return this.state.room ? this.state.room.roomId : this.state.roomId;
};
private getPermalinkCreatorForRoom(room: Room) {
if (this.permalinkCreators[room.roomId]) return this.permalinkCreators[room.roomId];
this.permalinkCreators[room.roomId] = new RoomPermalinkCreator(room);
if (this.state.room && room.roomId === this.state.room.roomId) {
// We want to watch for changes in the creator for the primary room in the view, but
// don't need to do so for search results.
this.permalinkCreators[room.roomId].start();
} else {
this.permalinkCreators[room.roomId].load();
}
return this.permalinkCreators[room.roomId];
}
private stopAllPermalinkCreators() {
if (!this.permalinkCreators) return;
for (const roomId of Object.keys(this.permalinkCreators)) {
this.permalinkCreators[roomId].stop();
}
}
private setupRoom(room: Room, roomId: string, joining: boolean, shouldPeek: boolean) {
// if this is an unknown room then we're in one of three states:
// - This is a room we can peek into (search engine) (we can /peek)
// - This is a room we can publicly join or were invited to. (we can /join)
// - This is a room we cannot join at all. (no action can help us)
// We can't try to /join because this may implicitly accept invites (!)
// We can /peek though. If it fails then we present the join UI. If it
// succeeds then great, show the preview (but we still may be able to /join!).
// Note that peeking works by room ID and room ID only, as opposed to joining
// which must be by alias or invite wherever possible (peeking currently does
// not work over federation).
// NB. We peek if we have never seen the room before (i.e. js-sdk does not know
// about it). We don't peek in the historical case where we were joined but are
// now not joined because the js-sdk peeking API will clobber our historical room,
// making it impossible to indicate a newly joined room.
if (!joining && roomId) {
if (!room && shouldPeek) {
logger.info("Attempting to peek into room %s", roomId);
this.setState({
peekLoading: true,
isPeeking: true, // this will change to false if peeking fails
});
this.context.peekInRoom(roomId).then((room) => {
if (this.unmounted) {
return;
}
this.setState({
room: room,
peekLoading: false,
});
this.onRoomLoaded(room);
}).catch((err) => {
if (this.unmounted) {
return;
}
// Stop peeking if anything went wrong
this.setState({
isPeeking: false,
});
// This won't necessarily be a MatrixError, but we duck-type
// here and say if it's got an 'errcode' key with the right value,
// it means we can't peek.
if (err.errcode === "M_GUEST_ACCESS_FORBIDDEN" || err.errcode === 'M_FORBIDDEN') {
// This is fine: the room just isn't peekable (we assume).
this.setState({
peekLoading: false,
});
} else {
throw err;
}
});
} else if (room) {
// Stop peeking because we have joined this room previously
this.context.stopPeeking();
this.setState({ isPeeking: false });
}
}
}
private shouldShowApps(room: Room) {
if (!BROWSER_SUPPORTS_SANDBOX || !room) return false;
// Check if user has previously chosen to hide the app drawer for this
// room. If so, do not show apps
const hideWidgetKey = room.roomId + "_hide_widget_drawer";
const hideWidgetDrawer = localStorage.getItem(hideWidgetKey);
// If unset show the Tray
// Otherwise (in case the user set hideWidgetDrawer by clicking the button) follow the parameter.
const isManuallyShown = hideWidgetDrawer ? hideWidgetDrawer === "false": true;
const widgets = WidgetLayoutStore.instance.getContainerWidgets(room, Container.Top);
return isManuallyShown && widgets.length > 0;
}
componentDidMount() {
this.onRoomViewStoreUpdate(true);
const call = this.getCallForRoom();
const callState = call ? call.state : null;
this.setState({
callState: callState,
});
CallHandler.instance.on(CallHandlerEvent.CallState, this.onCallState);
window.addEventListener('beforeunload', this.onPageUnload);
}
shouldComponentUpdate(nextProps, nextState) {
const hasPropsDiff = objectHasDiff(this.props, nextProps);
const { upgradeRecommendation, ...state } = this.state;
const { upgradeRecommendation: newUpgradeRecommendation, ...newState } = nextState;
const hasStateDiff =
newUpgradeRecommendation?.needsUpgrade !== upgradeRecommendation?.needsUpgrade ||
objectHasDiff(state, newState);
return hasPropsDiff || hasStateDiff;
}
componentDidUpdate() {
if (this.roomView.current) {
const roomView = this.roomView.current;
if (!roomView.ondrop) {
roomView.addEventListener('drop', this.onDrop);
roomView.addEventListener('dragover', this.onDragOver);
roomView.addEventListener('dragenter', this.onDragEnter);
roomView.addEventListener('dragleave', this.onDragLeave);
}
}
// Note: We check the ref here with a flag because componentDidMount, despite
// documentation, does not define our messagePanel ref. It looks like our spinner
// in render() prevents the ref from being set on first mount, so we try and
// catch the messagePanel when it does mount. Because we only want the ref once,
// we use a boolean flag to avoid duplicate work.
if (this.messagePanel && !this.state.atEndOfLiveTimelineInit) {
this.setState({
atEndOfLiveTimelineInit: true,
atEndOfLiveTimeline: this.messagePanel.isAtEndOfLiveTimeline(),
});
}
}
componentWillUnmount() {
// set a boolean to say we've been unmounted, which any pending
// promises can use to throw away their results.
//
// (We could use isMounted, but facebook have deprecated that.)
this.unmounted = true;
CallHandler.instance.removeListener(CallHandlerEvent.CallState, this.onCallState);
// update the scroll map before we get unmounted
if (this.state.roomId) {
RoomScrollStateStore.setScrollState(this.state.roomId, this.getScrollState());
}
if (this.state.shouldPeek) {
this.context.stopPeeking();
}
// stop tracking room changes to format permalinks
this.stopAllPermalinkCreators();
if (this.roomView.current) {
// disconnect the D&D event listeners from the room view. This
// is really just for hygiene - we're going to be
// deleted anyway, so it doesn't matter if the event listeners
// don't get cleaned up.
const roomView = this.roomView.current;
roomView.removeEventListener('drop', this.onDrop);
roomView.removeEventListener('dragover', this.onDragOver);
roomView.removeEventListener('dragenter', this.onDragEnter);
roomView.removeEventListener('dragleave', this.onDragLeave);
}
dis.unregister(this.dispatcherRef);
if (this.context) {
this.context.removeListener("Room", this.onRoom);
this.context.removeListener("Room.timeline", this.onRoomTimeline);
this.context.removeListener("Room.name", this.onRoomName);
this.context.removeListener("Room.accountData", this.onRoomAccountData);
this.context.removeListener("RoomState.events", this.onRoomStateEvents);
this.context.removeListener("Room.myMembership", this.onMyMembership);
this.context.removeListener("RoomState.members", this.onRoomStateMember);
this.context.removeListener("accountData", this.onAccountData);
this.context.removeListener("crypto.keyBackupStatus", this.onKeyBackupStatus);
this.context.removeListener("deviceVerificationChanged", this.onDeviceVerificationChanged);
this.context.removeListener("userTrustStatusChanged", this.onUserVerificationChanged);
this.context.removeListener("crossSigning.keysChanged", this.onCrossSigningKeysChanged);
this.context.removeListener("Event.decrypted", this.onEventDecrypted);
}
window.removeEventListener('beforeunload', this.onPageUnload);
// Remove RoomStore listener
if (this.roomStoreToken) {
this.roomStoreToken.remove();
}
RightPanelStore.instance.off(UPDATE_EVENT, this.onRightPanelStoreUpdate);
WidgetEchoStore.removeListener(UPDATE_EVENT, this.onWidgetEchoStoreUpdate);
WidgetStore.instance.removeListener(UPDATE_EVENT, this.onWidgetStoreUpdate);
if (this.state.room) {
WidgetLayoutStore.instance.off(
WidgetLayoutStore.emissionForRoom(this.state.room),
this.onWidgetLayoutChange,
);
}
CallHandler.instance.off(CallHandlerEvent.CallState, this.onCallState);
// cancel any pending calls to the throttled updated
this.updateRoomMembers.cancel();
for (const watcher of this.settingWatchers) {
SettingsStore.unwatchSetting(watcher);
}
}
private onUserScroll = () => {
if (this.state.initialEventId && this.state.isInitialEventHighlighted) {
dis.dispatch({
action: Action.ViewRoom,
room_id: this.state.room.roomId,
event_id: this.state.initialEventId,
highlighted: false,
replyingToEvent: this.state.replyToEvent,
});
}
};
private onRightPanelStoreUpdate = () => {
this.setState({
showRightPanel: RightPanelStore.instance.isOpenForRoom,
});
};
private onPageUnload = event => {
if (ContentMessages.sharedInstance().getCurrentUploads().length > 0) {
return event.returnValue =
_t("You seem to be uploading files, are you sure you want to quit?");
} else if (this.getCallForRoom() && this.state.callState !== 'ended') {
return event.returnValue =
_t("You seem to be in a call, are you sure you want to quit?");
}
};
private onReactKeyDown = ev => {
let handled = false;
const action = getKeyBindingsManager().getRoomAction(ev);
switch (action) {
case RoomAction.DismissReadMarker:
this.messagePanel.forgetReadMarker();
this.jumpToLiveTimeline();
handled = true;
break;
case RoomAction.JumpToOldestUnread:
this.jumpToReadMarker();
handled = true;
break;
case RoomAction.UploadFile:
dis.dispatch({ action: "upload_file" }, true);
handled = true;
break;
}
if (handled) {
ev.stopPropagation();
ev.preventDefault();
}
};
private onCallState = (roomId: string): void => {
// don't filter out payloads for room IDs other than props.room because
// we may be interested in the conf 1:1 room
if (!roomId) return;
const call = this.getCallForRoom();
this.setState({ callState: call ? call.state : null });
};
private onAction = async (payload: ActionPayload): Promise<void> => {
switch (payload.action) {
case 'message_sent':
this.checkDesktopNotifications();
break;
case 'post_sticker_message':
this.injectSticker(
payload.data.content.url,
payload.data.content.info,
payload.data.description || payload.data.name,
payload.data.threadId);
break;
case 'picture_snapshot':
ContentMessages.sharedInstance().sendContentListToRoom(
[payload.file], this.state.room.roomId, null, this.context);
break;
case 'notifier_enabled':
case Action.UploadStarted:
case Action.UploadFinished:
case Action.UploadCanceled:
this.forceUpdate();
break;
case 'appsDrawer':
this.setState({
showApps: payload.show,
});
break;
case 'reply_to_event':
if (this.state.searchResults
&& payload.event.getRoomId() === this.state.roomId
&& !this.unmounted
&& payload.context === TimelineRenderingType.Room) {
this.onCancelSearchClick();
}
break;
case 'quote':
if (this.state.searchResults) {
const roomId = payload.event.getRoomId();
if (roomId === this.state.roomId) {
this.onCancelSearchClick();
}
setImmediate(() => {
dis.dispatch({
action: Action.ViewRoom,
room_id: roomId,
deferred_action: payload,
});
});
}
break;
case 'sync_state':
if (!this.state.matrixClientIsReady) {
this.setState({
matrixClientIsReady: this.context && this.context.isInitialSyncComplete(),
}, () => {
// send another "initial" RVS update to trigger peeking if needed
this.onRoomViewStoreUpdate(true);
});
}
break;
case 'focus_search':
this.onSearchClick();
break;
case Action.EditEvent: {
// Quit early if we're trying to edit events in wrong rendering context
if (payload.timelineRenderingType !== this.state.timelineRenderingType) return;
const editState = payload.event ? new EditorStateTransfer(payload.event) : null;
this.setState({ editState }, () => {
if (payload.event) {
this.messagePanel?.scrollToEventIfNeeded(payload.event.getId());
}
});
break;
}
case Action.ComposerInsert: {
if (payload.composerType) break;
if (this.state.searching && payload.timelineRenderingType === TimelineRenderingType.Room) {
// we don't have the composer rendered in this state, so bring it back first
await this.onCancelSearchClick();
}
// re-dispatch to the correct composer
dis.dispatch({
...payload,
composerType: this.state.editState ? ComposerType.Edit : ComposerType.Send,
});
break;
}
case Action.FocusAComposer: {
// re-dispatch to the correct composer
dis.fire(this.state.editState ? Action.FocusEditMessageComposer : Action.FocusSendMessageComposer);
break;
}
case "scroll_to_bottom":
if (payload.timelineRenderingType === TimelineRenderingType.Room) {
this.messagePanel?.jumpToLiveTimeline();
}
break;
}
};
private onRoomTimeline = (ev: MatrixEvent, room: Room, toStartOfTimeline: boolean, removed, data) => {
if (this.unmounted) return;
// ignore events for other rooms
if (!room || room.roomId !== this.state.room?.roomId) return;
// ignore events from filtered timelines
if (data.timeline.getTimelineSet() !== room.getUnfilteredTimelineSet()) return;
if (ev.getType() === "org.matrix.room.preview_urls") {
this.updatePreviewUrlVisibility(room);
}
if (ev.getType() === "m.room.encryption") {
this.updateE2EStatus(room);
}
// ignore anything but real-time updates at the end of the room:
// updates from pagination will happen when the paginate completes.
if (toStartOfTimeline || !data || !data.liveEvent) return;
// no point handling anything while we're waiting for the join to finish:
// we'll only be showing a spinner.
if (this.state.joining) return;
if (!ev.isBeingDecrypted() && !ev.isDecryptionFailure()) {
this.handleEffects(ev);
}
if (ev.getSender() !== this.context.credentials.userId) {
// update unread count when scrolled up
if (!this.state.searchResults && this.state.atEndOfLiveTimeline) {
// no change
} else if (!shouldHideEvent(ev, this.state)) {
this.setState((state, props) => {
return { numUnreadMessages: state.numUnreadMessages + 1 };
});
}
}
};
private onEventDecrypted = (ev: MatrixEvent) => {
if (!this.state.room || !this.state.matrixClientIsReady) return; // not ready at all
if (ev.getRoomId() !== this.state.room.roomId) return; // not for us
if (ev.isDecryptionFailure()) return;
this.handleEffects(ev);
};
private handleEffects = (ev: MatrixEvent) => {
const notifState = RoomNotificationStateStore.instance.getRoomState(this.state.room);
if (!notifState.isUnread) return;
CHAT_EFFECTS.forEach(effect => {
if (containsEmoji(ev.getContent(), effect.emojis) || ev.getContent().msgtype === effect.msgType) {
// For initial threads launch, chat effects are disabled
// see #19731
if (!SettingsStore.getValue("feature_thread") || !ev.isThreadRelation) {
dis.dispatch({ action: `effects.${effect.command}` });
}
}
});
};
private onRoomName = (room: Room) => {
if (this.state.room && room.roomId == this.state.room.roomId) {
this.forceUpdate();
}
};
private onKeyBackupStatus = () => {
// Key backup status changes affect whether the in-room recovery
// reminder is displayed.
this.forceUpdate();
};
public canResetTimeline = () => {
if (!this.messagePanel) {
return true;
}
return this.messagePanel.canResetTimeline();
};
// called when state.room is first initialised (either at initial load,
// after a successful peek, or after we join the room).
private onRoomLoaded = (room: Room) => {
if (this.unmounted) return;
// Attach a widget store listener only when we get a room
WidgetLayoutStore.instance.on(WidgetLayoutStore.emissionForRoom(room), this.onWidgetLayoutChange);
this.calculatePeekRules(room);
this.updatePreviewUrlVisibility(room);
this.loadMembersIfJoined(room);
this.calculateRecommendedVersion(room);
this.updateE2EStatus(room);
this.updatePermissions(room);
this.checkWidgets(room);
this.setState({
liveTimeline: room.getLiveTimeline(),
});
};
private async calculateRecommendedVersion(room: Room) {
const upgradeRecommendation = await room.getRecommendedVersion();
if (this.unmounted) return;
this.setState({ upgradeRecommendation });
}
private async loadMembersIfJoined(room: Room) {
// lazy load members if enabled
if (this.context.hasLazyLoadMembersEnabled()) {
if (room && room.getMyMembership() === 'join') {
try {
await room.loadMembersIfNeeded();
if (!this.unmounted) {
this.setState({ membersLoaded: true });
}
} catch (err) {
const errorMessage = `Fetching room members for ${room.roomId} failed.` +
" Room members will appear incomplete.";
logger.error(errorMessage);
logger.error(err);
}
}
}
}
private calculatePeekRules(room: Room) {
const guestAccessEvent = room.currentState.getStateEvents("m.room.guest_access", "");
if (guestAccessEvent && guestAccessEvent.getContent().guest_access === "can_join") {
this.setState({
guestsCanJoin: true,
});
}
const historyVisibility = room.currentState.getStateEvents("m.room.history_visibility", "");
if (historyVisibility && historyVisibility.getContent().history_visibility === "world_readable") {
this.setState({
canPeek: true,
});
}
}
private updatePreviewUrlVisibility({ roomId }: Room) {
// URL Previews in E2EE rooms can be a privacy leak so use a different setting which is per-room explicit
const key = this.context.isRoomEncrypted(roomId) ? 'urlPreviewsEnabled_e2ee' : 'urlPreviewsEnabled';
this.setState({
showUrlPreview: SettingsStore.getValue(key, roomId),
});
}
private onRoom = (room: Room) => {
if (!room || room.roomId !== this.state.roomId) {
return;
}
// Detach the listener if the room is changing for some reason
if (this.state.room) {
WidgetLayoutStore.instance.off(
WidgetLayoutStore.emissionForRoom(this.state.room),
this.onWidgetLayoutChange,
);
}
this.setState({
room: room,
}, () => {
this.onRoomLoaded(room);
});
};
private onDeviceVerificationChanged = (userId: string, device: object) => {
const room = this.state.room;
if (!room.currentState.getMember(userId)) {
return;
}
this.updateE2EStatus(room);
};
private onUserVerificationChanged = (userId: string, trustStatus: object) => {
const room = this.state.room;
if (!room || !room.currentState.getMember(userId)) {
return;
}
this.updateE2EStatus(room);
};
private onCrossSigningKeysChanged = () => {
const room = this.state.room;
if (room) {
this.updateE2EStatus(room);
}
};
private async updateE2EStatus(room: Room) {
if (!this.context.isRoomEncrypted(room.roomId)) return;
// If crypto is not currently enabled, we aren't tracking devices at all,
// so we don't know what the answer is. Let's error on the safe side and show
// a warning for this case.
let e2eStatus = E2EStatus.Warning;
if (this.context.isCryptoEnabled()) {
/* At this point, the user has encryption on and cross-signing on */
e2eStatus = await shieldStatusForRoom(this.context, room);
}
if (this.unmounted) return;
this.setState({ e2eStatus });
}
private onAccountData = (event: MatrixEvent) => {
const type = event.getType();
if ((type === "org.matrix.preview_urls" || type === "im.vector.web.settings") && this.state.room) {
// non-e2ee url previews are stored in legacy event type `org.matrix.room.preview_urls`
this.updatePreviewUrlVisibility(this.state.room);
}
};
private onRoomAccountData = (event: MatrixEvent, room: Room) => {
if (room.roomId == this.state.roomId) {
const type = event.getType();
if (type === "org.matrix.room.preview_urls" || type === "im.vector.web.settings") {
// non-e2ee url previews are stored in legacy event type `org.matrix.room.preview_urls`
this.updatePreviewUrlVisibility(room);
}
}
};
private onRoomStateEvents = (ev: MatrixEvent, state: RoomState) => {
// ignore if we don't have a room yet
if (!this.state.room || this.state.room.roomId !== state.roomId) {
return;
}
if (ev.getType() === EventType.RoomCanonicalAlias) {
// re-view the room so MatrixChat can manage the alias in the URL properly
dis.dispatch({
action: Action.ViewRoom,
room_id: this.state.room.roomId,
});
return; // this event cannot affect permissions so bail
}
this.updatePermissions(this.state.room);
};
private onRoomStateMember = (ev: MatrixEvent, state, member) => {
// ignore if we don't have a room yet
if (!this.state.room) {
return;
}
// ignore members in other rooms
if (member.roomId !== this.state.room.roomId) {
return;
}
this.updateRoomMembers();
};
private onMyMembership = (room: Room, membership: string, oldMembership: string) => {
if (room.roomId === this.state.roomId) {
this.forceUpdate();
this.loadMembersIfJoined(room);
this.updatePermissions(room);
}
};
private updatePermissions(room: Room) {
if (room) {
const me = this.context.getUserId();
const canReact = room.getMyMembership() === "join" && room.currentState.maySendEvent("m.reaction", me);
const canReply = room.maySendMessage();
this.setState({ canReact, canReply });
}
}
// rate limited because a power level change will emit an event for every member in the room.
private updateRoomMembers = throttle(() => {
this.updateDMState();
this.updateE2EStatus(this.state.room);
}, 500, { leading: true, trailing: true });
private checkDesktopNotifications() {
const memberCount = this.state.room.getJoinedMemberCount() + this.state.room.getInvitedMemberCount();
// if they are not alone prompt the user about notifications so they don't miss replies
if (memberCount > 1 && Notifier.shouldShowPrompt()) {
showNotificationsToast(true);
}
}
private updateDMState() {
const room = this.state.room;
if (room.getMyMembership() != "join") {
return;
}
const dmInviter = room.getDMInviter();
if (dmInviter) {
Rooms.setDMRoom(room.roomId, dmInviter);
}
}
private onSearchResultsFillRequest = (backwards: boolean): Promise<boolean> => {
if (!backwards) {
return Promise.resolve(false);
}
if (this.state.searchResults.next_batch) {
debuglog("requesting more search results");
const searchPromise = searchPagination(this.state.searchResults as ISearchResults);
return this.handleSearchResult(searchPromise);
} else {
debuglog("no more search results");
return Promise.resolve(false);
}
};
private onInviteButtonClick = () => {
// call AddressPickerDialog
dis.dispatch({
action: 'view_invite',
roomId: this.state.room.roomId,
});
};
private onJoinButtonClicked = () => {
// If the user is a ROU, allow them to transition to a PWLU
if (this.context && this.context.isGuest()) {
// Join this room once the user has registered and logged in
// (If we failed to peek, we may not have a valid room object.)
dis.dispatch({
action: 'do_after_sync_prepared',
deferred_action: {
action: Action.ViewRoom,
room_id: this.getRoomId(),
},
});
dis.dispatch({ action: 'require_registration' });
} else {
Promise.resolve().then(() => {
const signUrl = this.props.threepidInvite?.signUrl;
dis.dispatch({
action: Action.JoinRoom,
roomId: this.getRoomId(),
opts: { inviteSignUrl: signUrl },
_type: "unknown", // TODO: instrumentation
});
return Promise.resolve();
});
}
};
private onMessageListScroll = ev => {
if (this.messagePanel.isAtEndOfLiveTimeline()) {
this.setState({
numUnreadMessages: 0,
atEndOfLiveTimeline: true,
});
} else {
this.setState({
atEndOfLiveTimeline: false,
});
}
this.updateTopUnreadMessagesBar();
};
private onDragEnter = ev => {
ev.stopPropagation();
ev.preventDefault();
// We always increment the counter no matter the types, because dragging is
// still happening. If we didn't, the drag counter would get out of sync.
this.setState({ dragCounter: this.state.dragCounter + 1 });
// See:
// https://docs.w3cub.com/dom/datatransfer/types
// https://developer.mozilla.org/en-US/docs/Web/API/HTML_Drag_and_Drop_API/Recommended_drag_types#file
if (ev.dataTransfer.types.includes("Files") || ev.dataTransfer.types.includes("application/x-moz-file")) {
this.setState({ draggingFile: true });
}
};
private onDragLeave = ev => {
ev.stopPropagation();
ev.preventDefault();
this.setState({
dragCounter: this.state.dragCounter - 1,
});
if (this.state.dragCounter === 0) {
this.setState({
draggingFile: false,
});
}
};
private onDragOver = ev => {
ev.stopPropagation();
ev.preventDefault();
ev.dataTransfer.dropEffect = 'none';
// See:
// https://docs.w3cub.com/dom/datatransfer/types
// https://developer.mozilla.org/en-US/docs/Web/API/HTML_Drag_and_Drop_API/Recommended_drag_types#file
if (ev.dataTransfer.types.includes("Files") || ev.dataTransfer.types.includes("application/x-moz-file")) {
ev.dataTransfer.dropEffect = 'copy';
}
};
private onDrop = ev => {
ev.stopPropagation();
ev.preventDefault();
ContentMessages.sharedInstance().sendContentListToRoom(
ev.dataTransfer.files, this.state.room.roomId, null, this.context,
);
dis.fire(Action.FocusSendMessageComposer);
this.setState({
draggingFile: false,
dragCounter: this.state.dragCounter - 1,
});
};
private injectSticker(url: string, info: object, text: string, threadId: string | null) {
if (this.context.isGuest()) {
dis.dispatch({ action: 'require_registration' });
return;
}
ContentMessages.sharedInstance()
.sendStickerContentToRoom(url, this.state.room.roomId, threadId, info, text, this.context)
.then(undefined, (error) => {
if (error.name === "UnknownDeviceError") {
// Let the staus bar handle this
return;
}
});
}
private onSearch = (term: string, scope: SearchScope) => {
this.setState({
searchTerm: term,
searchScope: scope,
searchResults: {},
searchHighlights: [],
});
// if we already have a search panel, we need to tell it to forget
// about its scroll state.
if (this.searchResultsPanel.current) {
this.searchResultsPanel.current.resetScrollState();
}
// make sure that we don't end up showing results from
// an aborted search by keeping a unique id.
//
// todo: should cancel any previous search requests.
this.searchId = new Date().getTime();
let roomId;
if (scope === SearchScope.Room) roomId = this.state.room.roomId;
debuglog("sending search request");
const searchPromise = eventSearch(term, roomId);
this.handleSearchResult(searchPromise);
};
private handleSearchResult(searchPromise: Promise<any>): Promise<boolean> {
// keep a record of the current search id, so that if the search terms
// change before we get a response, we can ignore the results.
const localSearchId = this.searchId;
this.setState({
searchInProgress: true,
});
return searchPromise.then((results) => {
debuglog("search complete");
if (this.unmounted || !this.state.searching || this.searchId != localSearchId) {
logger.error("Discarding stale search results");
return false;
}
// postgres on synapse returns us precise details of the strings
// which actually got matched for highlighting.
//
// In either case, we want to highlight the literal search term
// whether it was used by the search engine or not.
let highlights = results.highlights;
if (highlights.indexOf(this.state.searchTerm) < 0) {
highlights = highlights.concat(this.state.searchTerm);
}
// For overlapping highlights,
// favour longer (more specific) terms first
highlights = highlights.sort(function(a, b) {
return b.length - a.length;
});
this.setState({
searchHighlights: highlights,
searchResults: results,
});
}, (error) => {
logger.error("Search failed", error);
Modal.createTrackedDialog('Search failed', '', ErrorDialog, {
title: _t("Search failed"),
description: ((error && error.message) ? error.message :
_t("Server may be unavailable, overloaded, or search timed out :(")),
});
return false;
}).finally(() => {
this.setState({
searchInProgress: false,
});
});
}
private getSearchResultTiles() {
// XXX: todo: merge overlapping results somehow?
// XXX: why doesn't searching on name work?
const ret = [];
if (this.state.searchInProgress) {
ret.push(<li key="search-spinner">
<Spinner />
</li>);
}
if (!this.state.searchResults.next_batch) {
if (!this.state.searchResults?.results?.length) {
ret.push(<li key="search-top-marker">
<h2 className="mx_RoomView_topMarker">{ _t("No results") }</h2>
</li>,
);
} else {
ret.push(<li key="search-top-marker">
<h2 className="mx_RoomView_topMarker">{ _t("No more results") }</h2>
</li>,
);
}
}
// once dynamic content in the search results load, make the scrollPanel check
// the scroll offsets.
const onHeightChanged = () => {
const scrollPanel = this.searchResultsPanel.current;
if (scrollPanel) {
scrollPanel.checkScroll();
}
};
let lastRoomId;
for (let i = (this.state.searchResults?.results?.length || 0) - 1; i >= 0; i--) {
const result = this.state.searchResults.results[i];
const mxEv = result.context.getEvent();
const roomId = mxEv.getRoomId();
const room = this.context.getRoom(roomId);
if (!room) {
// if we do not have the room in js-sdk stores then hide it as we cannot easily show it
// As per the spec, an all rooms search can create this condition,
// it happens with Seshat but not Synapse.
// It will make the result count not match the displayed count.
logger.log("Hiding search result from an unknown room", roomId);
continue;
}
if (!haveTileForEvent(mxEv, this.state.showHiddenEventsInTimeline)) {
// XXX: can this ever happen? It will make the result count
// not match the displayed count.
continue;
}
if (this.state.searchScope === 'All') {
if (roomId !== lastRoomId) {
ret.push(<li key={mxEv.getId() + "-room"}>
<h2>{ _t("Room") }: { room.name }</h2>
</li>);
lastRoomId = roomId;
}
}
const resultLink = "#/room/"+roomId+"/"+mxEv.getId();
ret.push(<SearchResultTile
key={mxEv.getId()}
searchResult={result}
searchHighlights={this.state.searchHighlights}
resultLink={resultLink}
permalinkCreator={this.getPermalinkCreatorForRoom(room)}
onHeightChanged={onHeightChanged}
/>);
}
return ret;
}
private onCallPlaced = (type: CallType): void => {
CallHandler.instance.placeCall(this.state.room?.roomId, type);
};
private onAppsClick = () => {
dis.dispatch({
action: "appsDrawer",
show: !this.state.showApps,
});
};
private onForgetClick = () => {
dis.dispatch({
action: 'forget_room',
room_id: this.state.room.roomId,
});
};
private onRejectButtonClicked = () => {
this.setState({
rejecting: true,
});
this.context.leave(this.state.roomId).then(() => {
dis.dispatch({ action: 'view_home_page' });
this.setState({
rejecting: false,
});
}, (error) => {
logger.error("Failed to reject invite: %s", error);
const msg = error.message ? error.message : JSON.stringify(error);
Modal.createTrackedDialog('Failed to reject invite', '', ErrorDialog, {
title: _t("Failed to reject invite"),
description: msg,
});
this.setState({
rejecting: false,
rejectError: error,
});
});
};
private onRejectAndIgnoreClick = async () => {
this.setState({
rejecting: true,
});
try {
const myMember = this.state.room.getMember(this.context.getUserId());
const inviteEvent = myMember.events.member;
const ignoredUsers = this.context.getIgnoredUsers();
ignoredUsers.push(inviteEvent.getSender()); // de-duped internally in the js-sdk
await this.context.setIgnoredUsers(ignoredUsers);
await this.context.leave(this.state.roomId);
dis.dispatch({ action: 'view_home_page' });
this.setState({
rejecting: false,
});
} catch (error) {
logger.error("Failed to reject invite: %s", error);
const msg = error.message ? error.message : JSON.stringify(error);
Modal.createTrackedDialog('Failed to reject invite', '', ErrorDialog, {
title: _t("Failed to reject invite"),
description: msg,
});
this.setState({
rejecting: false,
rejectError: error,
});
}
};
private onRejectThreepidInviteButtonClicked = () => {
// We can reject 3pid invites in the same way that we accept them,
// using /leave rather than /join. In the short term though, we
// just ignore them.
// https://github.com/vector-im/vector-web/issues/1134
dis.fire(Action.ViewRoomDirectory);
};
private onSearchClick = () => {
this.setState({
searching: !this.state.searching,
});
};
private onCancelSearchClick = (): Promise<void> => {
return new Promise<void>(resolve => {
this.setState({
searching: false,
searchResults: null,
}, resolve);
});
};
// jump down to the bottom of this room, where new events are arriving
private jumpToLiveTimeline = () => {
if (this.state.initialEventId && this.state.isInitialEventHighlighted) {
// If we were viewing a highlighted event, firing view_room without
// an event will take care of both clearing the URL fragment and
// jumping to the bottom
dis.dispatch({
action: Action.ViewRoom,
room_id: this.state.room.roomId,
});
} else {
// Otherwise we have to jump manually
this.messagePanel.jumpToLiveTimeline();
dis.fire(Action.FocusSendMessageComposer);
}
};
// jump up to wherever our read marker is
private jumpToReadMarker = () => {
this.messagePanel.jumpToReadMarker();
};
// update the read marker to match the read-receipt
private forgetReadMarker = ev => {
ev.stopPropagation();
this.messagePanel.forgetReadMarker();
};
// decide whether or not the top 'unread messages' bar should be shown
private updateTopUnreadMessagesBar = () => {
if (!this.messagePanel) {
return;
}
const showBar = this.messagePanel.canJumpToReadMarker();
if (this.state.showTopUnreadMessagesBar != showBar) {
this.setState({ showTopUnreadMessagesBar: showBar });
}
};
// get the current scroll position of the room, so that it can be
// restored when we switch back to it.
//
private getScrollState(): ScrollState {
const messagePanel = this.messagePanel;
if (!messagePanel) return null;
// if we're following the live timeline, we want to return null; that
// means that, if we switch back, we will jump to the read-up-to mark.
//
// That should be more intuitive than slavishly preserving the current
// scroll state, in the case where the room advances in the meantime
// (particularly in the case that the user reads some stuff on another
// device).
//
if (this.state.atEndOfLiveTimeline) {
return null;
}
const scrollState = messagePanel.getScrollState();
// getScrollState on TimelinePanel *may* return null, so guard against that
if (!scrollState || scrollState.stuckAtBottom) {
// we don't really expect to be in this state, but it will
// occasionally happen when no scroll state has been set on the
// messagePanel (ie, we didn't have an initial event (so it's
// probably a new room), there has been no user-initiated scroll, and
// no read-receipts have arrived to update the scroll position).
//
// Return null, which will cause us to scroll to last unread on
// reload.
return null;
}
return {
focussedEvent: scrollState.trackedScrollToken,
pixelOffset: scrollState.pixelOffset,
};
}
private onStatusBarVisible = () => {
if (this.unmounted || this.state.statusBarVisible) return;
this.setState({ statusBarVisible: true });
};
private onStatusBarHidden = () => {
// This is currently not desired as it is annoying if it keeps expanding and collapsing
if (this.unmounted || !this.state.statusBarVisible) return;
this.setState({ statusBarVisible: false });
};
/**
* called by the parent component when PageUp/Down/etc is pressed.
*
* We pass it down to the scroll panel.
*/
private handleScrollKey = ev => {
let panel;
if (this.searchResultsPanel.current) {
panel = this.searchResultsPanel.current;
} else if (this.messagePanel) {
panel = this.messagePanel;
}
if (panel) {
panel.handleScrollKey(ev);
}
};
/** | if (!this.state.room) {
return null;
}
return CallHandler.instance.getCallForRoom(this.state.room.roomId);
}
// this has to be a proper method rather than an unnamed function,
// otherwise react calls it with null on each update.
private gatherTimelinePanelRef = r => {
this.messagePanel = r;
};
private getOldRoom() {
const createEvent = this.state.room.currentState.getStateEvents("m.room.create", "");
if (!createEvent || !createEvent.getContent()['predecessor']) return null;
return this.context.getRoom(createEvent.getContent()['predecessor']['room_id']);
}
getHiddenHighlightCount() {
const oldRoom = this.getOldRoom();
if (!oldRoom) return 0;
return oldRoom.getUnreadNotificationCount('highlight');
}
onHiddenHighlightsClick = () => {
const oldRoom = this.getOldRoom();
if (!oldRoom) return;
dis.dispatch({
action: Action.ViewRoom,
room_id: oldRoom.roomId,
});
};
render() {
if (!this.state.room) {
const loading = !this.state.matrixClientIsReady || this.state.roomLoading || this.state.peekLoading;
if (loading) {
// Assume preview loading if we don't have a ready client or a room ID (still resolving the alias)
const previewLoading = !this.state.matrixClientIsReady || !this.state.roomId || this.state.peekLoading;
return (
<div className="mx_RoomView">
<ErrorBoundary>
<RoomPreviewBar
canPreview={false}
previewLoading={previewLoading && !this.state.roomLoadError}
error={this.state.roomLoadError}
loading={loading}
joining={this.state.joining}
oobData={this.props.oobData}
/>
</ErrorBoundary>
</div>
);
} else {
let inviterName = undefined;
if (this.props.oobData) {
inviterName = this.props.oobData.inviterName;
}
const invitedEmail = this.props.threepidInvite?.toEmail;
// We have no room object for this room, only the ID.
// We've got to this room by following a link, possibly a third party invite.
const roomAlias = this.state.roomAlias;
return (
<div className="mx_RoomView">
<ErrorBoundary>
<RoomPreviewBar
onJoinClick={this.onJoinButtonClicked}
onForgetClick={this.onForgetClick}
onRejectClick={this.onRejectThreepidInviteButtonClicked}
canPreview={false}
error={this.state.roomLoadError}
roomAlias={roomAlias}
joining={this.state.joining}
inviterName={inviterName}
invitedEmail={invitedEmail}
oobData={this.props.oobData}
signUrl={this.props.threepidInvite?.signUrl}
room={this.state.room}
/>
</ErrorBoundary>
</div>
);
}
}
const myMembership = this.state.room.getMyMembership();
// SpaceRoomView handles invites itself
if (myMembership === "invite" && (!SpaceStore.spacesEnabled || !this.state.room.isSpaceRoom())) {
if (this.state.joining || this.state.rejecting) {
return (
<ErrorBoundary>
<RoomPreviewBar
canPreview={false}
error={this.state.roomLoadError}
joining={this.state.joining}
rejecting={this.state.rejecting}
/>
</ErrorBoundary>
);
} else {
const myUserId = this.context.credentials.userId;
const myMember = this.state.room.getMember(myUserId);
const inviteEvent = myMember ? myMember.events.member : null;
let inviterName = _t("Unknown");
if (inviteEvent) {
inviterName = inviteEvent.sender ? inviteEvent.sender.name : inviteEvent.getSender();
}
// We deliberately don't try to peek into invites, even if we have permission to peek
// as they could be a spam vector.
// XXX: in future we could give the option of a 'Preview' button which lets them view anyway.
// We have a regular invite for this room.
return (
<div className="mx_RoomView">
<ErrorBoundary>
<RoomPreviewBar
onJoinClick={this.onJoinButtonClicked}
onForgetClick={this.onForgetClick}
onRejectClick={this.onRejectButtonClicked}
onRejectAndIgnoreClick={this.onRejectAndIgnoreClick}
inviterName={inviterName}
canPreview={false}
joining={this.state.joining}
room={this.state.room}
/>
</ErrorBoundary>
</div>
);
}
}
let fileDropTarget = null;
if (this.state.draggingFile) {
fileDropTarget = (
<div className="mx_RoomView_fileDropTarget">
<img
src={require("../../../res/img/upload-big.svg")}
className="mx_RoomView_fileDropTarget_image"
/>
{ _t("Drop file here to upload") }
</div>
);
}
// We have successfully loaded this room, and are not previewing.
// Display the "normal" room view.
let activeCall = null;
{
// New block because this variable doesn't need to hang around for the rest of the function
const call = this.getCallForRoom();
if (call && (this.state.callState !== 'ended' && this.state.callState !== 'ringing')) {
activeCall = call;
}
}
const scrollheaderClasses = classNames({
mx_RoomView_scrollheader: true,
});
let statusBar;
let isStatusAreaExpanded = true;
if (ContentMessages.sharedInstance().getCurrentUploads().length > 0) {
statusBar = <UploadBar room={this.state.room} />;
} else if (!this.state.searchResults) {
isStatusAreaExpanded = this.state.statusBarVisible;
statusBar = <RoomStatusBar
room={this.state.room}
isPeeking={myMembership !== "join"}
onInviteClick={this.onInviteButtonClick}
onVisible={this.onStatusBarVisible}
onHidden={this.onStatusBarHidden}
/>;
}
const statusBarAreaClass = classNames("mx_RoomView_statusArea", {
"mx_RoomView_statusArea_expanded": isStatusAreaExpanded,
});
// if statusBar does not exist then statusBarArea is blank and takes up unnecessary space on the screen
// show statusBarArea only if statusBar is present
const statusBarArea = statusBar && <div className={statusBarAreaClass}>
<div className="mx_RoomView_statusAreaBox">
<div className="mx_RoomView_statusAreaBox_line" />
{ statusBar }
</div>
</div>;
const roomVersionRecommendation = this.state.upgradeRecommendation;
const showRoomUpgradeBar = (
roomVersionRecommendation &&
roomVersionRecommendation.needsUpgrade &&
this.state.room.userMayUpgradeRoom(this.context.credentials.userId)
);
const hiddenHighlightCount = this.getHiddenHighlightCount();
let aux = null;
let previewBar;
if (this.state.searching) {
aux = <SearchBar
searchInProgress={this.state.searchInProgress}
onCancelClick={this.onCancelSearchClick}
onSearch={this.onSearch}
isRoomEncrypted={this.context.isRoomEncrypted(this.state.room.roomId)}
/>;
} else if (showRoomUpgradeBar) {
aux = <RoomUpgradeWarningBar room={this.state.room} />;
} else if (myMembership !== "join") {
// We do have a room object for this room, but we're not currently in it.
// We may have a 3rd party invite to it.
let inviterName = undefined;
if (this.props.oobData) {
inviterName = this.props.oobData.inviterName;
}
const invitedEmail = this.props.threepidInvite?.toEmail;
previewBar = (
<RoomPreviewBar
onJoinClick={this.onJoinButtonClicked}
onForgetClick={this.onForgetClick}
onRejectClick={this.onRejectThreepidInviteButtonClicked}
joining={this.state.joining}
inviterName={inviterName}
invitedEmail={invitedEmail}
oobData={this.props.oobData}
canPreview={this.state.canPeek}
room={this.state.room}
/>
);
if (!this.state.canPeek && (!SpaceStore.spacesEnabled || !this.state.room?.isSpaceRoom())) {
return (
<div className="mx_RoomView">
{ previewBar }
</div>
);
}
} else if (hiddenHighlightCount > 0) {
aux = (
<AccessibleButton
element="div"
className="mx_RoomView_auxPanel_hiddenHighlights"
onClick={this.onHiddenHighlightsClick}
>
{ _t(
"You have %(count)s unread notifications in a prior version of this room.",
{ count: hiddenHighlightCount },
) }
</AccessibleButton>
);
}
if (this.state.room?.isSpaceRoom() && !this.props.forceTimeline) {
return <SpaceRoomView
space={this.state.room}
justCreatedOpts={this.props.justCreatedOpts}
resizeNotifier={this.props.resizeNotifier}
onJoinButtonClicked={this.onJoinButtonClicked}
onRejectButtonClicked={this.props.threepidInvite
? this.onRejectThreepidInviteButtonClicked
: this.onRejectButtonClicked}
/>;
}
const auxPanel = (
<AuxPanel
room={this.state.room}
userId={this.context.credentials.userId}
showApps={this.state.showApps}
resizeNotifier={this.props.resizeNotifier}
>
{ aux }
</AuxPanel>
);
let messageComposer; let searchInfo;
const canSpeak = (
// joined and not showing search results
myMembership === 'join' && !this.state.searchResults
);
if (canSpeak) {
messageComposer =
<MessageComposer
room={this.state.room}
e2eStatus={this.state.e2eStatus}
resizeNotifier={this.props.resizeNotifier}
replyToEvent={this.state.replyToEvent}
permalinkCreator={this.getPermalinkCreatorForRoom(this.state.room)}
/>;
}
// TODO: Why aren't we storing the term/scope/count in this format
// in this.state if this is what RoomHeader desires?
if (this.state.searchResults) {
searchInfo = {
searchTerm: this.state.searchTerm,
searchScope: this.state.searchScope,
searchCount: this.state.searchResults.count,
};
}
// if we have search results, we keep the messagepanel (so that it preserves its
// scroll state), but hide it.
let searchResultsPanel;
let hideMessagePanel = false;
if (this.state.searchResults) {
// show searching spinner
if (this.state.searchResults.count === undefined) {
searchResultsPanel = (
<div className="mx_RoomView_messagePanel mx_RoomView_messagePanelSearchSpinner" />
);
} else {
searchResultsPanel = (
<ScrollPanel
ref={this.searchResultsPanel}
className="mx_RoomView_messagePanel mx_RoomView_searchResultsPanel mx_GroupLayout"
onFillRequest={this.onSearchResultsFillRequest}
resizeNotifier={this.props.resizeNotifier}
>
<li className={scrollheaderClasses} />
{ this.getSearchResultTiles() }
</ScrollPanel>
);
}
hideMessagePanel = true;
}
let highlightedEventId = null;
if (this.state.isInitialEventHighlighted) {
highlightedEventId = this.state.initialEventId;
}
const messagePanelClassNames = classNames(
"mx_RoomView_messagePanel",
{
"mx_IRCLayout": this.state.layout == Layout.IRC,
"mx_GroupLayout": this.state.layout == Layout.Group,
});
// console.info("ShowUrlPreview for %s is %s", this.state.room.roomId, this.state.showUrlPreview);
const messagePanel = (
<TimelinePanel
ref={this.gatherTimelinePanelRef}
timelineSet={this.state.room.getUnfilteredTimelineSet()}
showReadReceipts={this.state.showReadReceipts}
manageReadReceipts={!this.state.isPeeking}
sendReadReceiptOnLoad={!this.state.wasContextSwitch}
manageReadMarkers={!this.state.isPeeking}
hidden={hideMessagePanel}
highlightedEventId={highlightedEventId}
eventId={this.state.initialEventId}
eventPixelOffset={this.state.initialEventPixelOffset}
onScroll={this.onMessageListScroll}
onUserScroll={this.onUserScroll}
onReadMarkerUpdated={this.updateTopUnreadMessagesBar}
showUrlPreview={this.state.showUrlPreview}
className={messagePanelClassNames}
membersLoaded={this.state.membersLoaded}
permalinkCreator={this.getPermalinkCreatorForRoom(this.state.room)}
resizeNotifier={this.props.resizeNotifier}
showReactions={true}
layout={this.state.layout}
editState={this.state.editState}
/>);
let topUnreadMessagesBar = null;
// Do not show TopUnreadMessagesBar if we have search results showing, it makes no sense
if (this.state.showTopUnreadMessagesBar && !this.state.searchResults) {
topUnreadMessagesBar = (
<TopUnreadMessagesBar onScrollUpClick={this.jumpToReadMarker} onCloseClick={this.forgetReadMarker} />
);
}
let jumpToBottom;
// Do not show JumpToBottomButton if we have search results showing, it makes no sense
if (!this.state.atEndOfLiveTimeline && !this.state.searchResults) {
jumpToBottom = (<JumpToBottomButton
highlight={this.state.room.getUnreadNotificationCount(NotificationCountType.Highlight) > 0}
numUnreadMessages={this.state.numUnreadMessages}
onScrollToBottomClick={this.jumpToLiveTimeline}
/>);
}
const showRightPanel = this.state.room && this.state.showRightPanel;
const rightPanel = showRightPanel
? <RightPanel
room={this.state.room}
resizeNotifier={this.props.resizeNotifier}
permalinkCreator={this.getPermalinkCreatorForRoom(this.state.room)}
e2eStatus={this.state.e2eStatus} />
: null;
const timelineClasses = classNames("mx_RoomView_timeline", {
mx_RoomView_timeline_rr_enabled: this.state.showReadReceipts,
});
const mainClasses = classNames("mx_RoomView", {
mx_RoomView_inCall: Boolean(activeCall),
});
const showChatEffects = SettingsStore.getValue('showChatEffects');
// Decide what to show in the main split
let mainSplitBody = <React.Fragment>
{ auxPanel }
<div className={timelineClasses}>
{ fileDropTarget }
{ topUnreadMessagesBar }
{ jumpToBottom }
{ messagePanel }
{ searchResultsPanel }
</div>
{ statusBarArea }
{ previewBar }
{ messageComposer }
</React.Fragment>;
switch (this.state.mainSplitContentType) {
case MainSplitContentType.Timeline:
// keep the timeline in as the mainSplitBody
break;
case MainSplitContentType.MaximisedWidget:
mainSplitBody = <AppsDrawer
room={this.state.room}
userId={this.context.credentials.userId}
resizeNotifier={this.props.resizeNotifier}
showApps={true}
/>;
break;
// TODO-video MainSplitContentType.Video:
// break;
}
let excludedRightPanelPhaseButtons = [RightPanelPhases.Timeline];
let onAppsClick = this.onAppsClick;
let onForgetClick = this.onForgetClick;
let onSearchClick = this.onSearchClick;
if (this.state.mainSplitContentType === MainSplitContentType.MaximisedWidget) {
// Disable phase buttons and action button to have a simplified header when a widget is maximised
// and enable (not disable) the RightPanelPhases.Timeline button
excludedRightPanelPhaseButtons = [
RightPanelPhases.ThreadPanel,
RightPanelPhases.PinnedMessages,
];
onAppsClick = null;
onForgetClick = null;
onSearchClick = null;
}
return (
<RoomContext.Provider value={this.state}>
<main className={mainClasses} ref={this.roomView} onKeyDown={this.onReactKeyDown}>
{ showChatEffects && this.roomView.current &&
<EffectsOverlay roomWidth={this.roomView.current.offsetWidth} />
}
<ErrorBoundary>
<RoomHeader
room={this.state.room}
searchInfo={searchInfo}
oobData={this.props.oobData}
inRoom={myMembership === 'join'}
onSearchClick={onSearchClick}
onForgetClick={(myMembership === "leave") ? onForgetClick : null}
e2eStatus={this.state.e2eStatus}
onAppsClick={this.state.hasPinnedWidgets ? onAppsClick : null}
appsShown={this.state.showApps}
onCallPlaced={this.onCallPlaced}
excludedRightPanelPhaseButtons={excludedRightPanelPhaseButtons}
/>
<MainSplit panel={rightPanel} resizeNotifier={this.props.resizeNotifier}>
<div className="mx_RoomView_body" data-layout={this.state.layout}>
{ mainSplitBody }
</div>
</MainSplit>
</ErrorBoundary>
</main>
</RoomContext.Provider>
);
}
}
const RoomViewWithMatrixClient = withMatrixClientHOC(RoomView);
export default RoomViewWithMatrixClient; | * get any current call for this room
*/
private getCallForRoom(): MatrixCall { |
id_factory.rs | // Copyright Kamu Data, Inc. and contributors. All rights reserved.
//
// Use of this software is governed by the Business Source License
// included in the LICENSE file.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0.
use std::convert::TryFrom;
use digest::Digest;
use opendatafabric::*;
use rand::Rng;
pub struct IDFactory;
/// Generates randomized unique identities for different resources
impl IDFactory {
pub fn dataset_id() -> DatasetID |
pub fn dataset_name() -> DatasetName {
// TODO: create more readable IDs like docker does
let mut name = String::with_capacity(20);
name.extend(
rand::thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
.take(20)
.map(char::from),
);
DatasetName::try_from(name).unwrap()
}
}
| {
let name = Self::dataset_name();
let digest = sha3::Sha3_256::digest(name.as_bytes());
DatasetID::from_pub_key_ed25519(&digest)
} |
var_int.rs | use bytes::BytesMut;
use criterion::{black_box, criterion_group, criterion_main, Benchmark, Criterion};
use protocol::{Decode, Encode, Var};
fn | (c: &mut Criterion) {
c.bench(
"benches",
Benchmark::new("write_var_u64", move |b| {
b.iter(|| {
let mut buf = BytesMut::with_capacity(10);
Var(black_box(12_456_456_456_465_464u64)).encode(black_box(&mut buf))
})
}),
);
c.bench(
"benches",
Benchmark::new("write_var_u32", move |b| {
b.iter(|| {
let mut buf = BytesMut::with_capacity(10);
Var(black_box(3_000_000_000u32)).encode(black_box(&mut buf))
})
}),
);
c.bench(
"benches",
Benchmark::new("read_var_u64", move |b| {
let mut buf = BytesMut::new();
Var(12_456_456_456_465_464u64).encode(&mut buf);
b.iter(|| Var::<u64>::decode(&mut (black_box(&buf[0..]))).unwrap())
}),
);
c.bench(
"benches",
Benchmark::new("read_var_u32", move |b| {
let mut buf = BytesMut::new();
Var(3_000_000_000u32).encode(&mut buf);
b.iter(|| Var::<u32>::decode(&mut (black_box(&buf[0..]))).unwrap())
}),
);
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| criterion_benchmark |
editStatesModal.js | function editStatesModal($id) {
url = "getInfosStatesModal.html";
// on declare un formulaire
var form = {
id: $id
};
//on post
var state = send_post(form, url);
$('#id_state').val(state.id_state);
$('#new_name_state').val(state.name_state);
| } |
|
students.py | import csv
import re
def main():
run = True
I_NUMBERS = 0
STUDENT_NAME = 1
path = 'E:/GitHub/2021-cs111-programming-with-functions/w09-text-files/teach-/students.csv'
students = get_dictionary(path, I_NUMBERS, STUDENT_NAME)
while run:
print()
number_student = input('Please enter an I-Number (xx-xxx-xxxx): ')
undashed_number = number_student.replace('-', '')
regex_condition = re.match('^[\d_-]*$', undashed_number)
# if not inumber.isdigit():
if regex_condition == None:
print('Invalid I-Number')
else:
if len(undashed_number) < 9:
print('Invalid I-Number: Too few digits')
elif len(undashed_number) > 9:
print('Invalid I-Number: Too many digits')
else:
# Solution 1
found = False
for id, student in students.items():
if id == undashed_number:
print(f'Student name is: {student}')
found = True
if not found:
print('No such student')
# Solution 2
# if inumber not in students:
# print("No such student")
# else:
# value = students[inumber]
# name = value[NAME]
# print(name)
print()
continue_ = int(input('Continue? Yes: 1, No: Something else → '))
if continue_ != 1:
run = False
def get_dictionary(file, key_index, value_index):
dictionary = {}
with open(file, 'rt') as csv_file:
reader = csv.reader(csv_file)
next(reader)
for row in reader:
key = row[key_index]
value = row[value_index]
dictionary[key] = value
return dictionary
# If this file was executed like this:
# > python teach_solution.py
# then call the main function. However, if this file | # was simply imported, then skip the call to main.
if __name__ == '__main__':
main() |
|
sketch.js | // ************************************************
// Teugg Soccer Physics
// Rip-off of the once popular Soccer Physics game
// Horribly coded by : Kevin Le Teugg, 2020
// File : sketch.js
// Description : Constants declaration, variables initialization, setup, animation loop and mouse / keys events
// ************************************************
// Matter.js module aliases
var Engine = Matter.Engine,
World = Matter.World,
Bodies = Matter.Bodies,
Body = Matter.Body;
Render = Matter.Render;
// Matter.js variables
var engine;
var world;
var ground;
// CONSTANTS
// Canvas dimensions
let CANVAS_WIDTH = 1280; // Default 1280
let CANVAS_HEIGHT = 720; // Default 720
// Menus states
let MAIN_MENU = 0;
let P1_LOCAL_CHOOSE_SIDE_MENU = 1;
let P2_LOCAL_SELECTED = 2;
let P2_ONLINE_MENU = 3;
let P1_LOCAL_LEFT_SELECTED = 4;
let P1_LOCAL_RIGHT_SELECTED = 5;
let P2_ONLINE_CREATE_MENU = 6;
let P2_ONLINE_JOIN_MENU = 7;
// Collision categories
var generalCollCategory = 0x0001, // Used for all parts of player except counterweight, ball and goal
generalNoCollCategory = 0x0002, // Used only for the counterweight of players
groundCollCategory = 0x0004; // Used for ground
// INITIALIZATION VARIABLES
// Menus states
let menu = MAIN_MENU;
// Elements dimensions
let groundWidth = CANVAS_WIDTH;
let groundHeight = 100; // Has to be a big arbitrary number because of the unknown surrounding the inner computing time of the Matter.js engine
let groundOffset = 6;
let groundX = CANVAS_WIDTH / 2;
let groundY = CANVAS_HEIGHT - groundHeight / 2;
let menuButtonWidth = CANVAS_WIDTH / 5;
let menuButtonHeight = CANVAS_HEIGHT / 10;
// Score and timer initialisation
let score1 = 0;
let score2 = 0;
let elapsedTimeSec = 0;
let elapsedTimeMin = 0;
// Coefficient that is applied to the tiltForce vector that is derived from the axes[1] vector of the player
let tiltForceCoeff = 0.007;
// Coefficient that is applied to the kickForce vector that is perpendicular to the movable leg of the players
let kickForceCoeff = 0.0018;
// Coefficient that is applied to the jumpForce vector
let jumpForceCoeff = 0.55; // 0.5 works
// Stiffness of the players movable leg when the kick function is no more engaged
let idleLegStiffness = 0.1; // Previously 0.06
// Sprites handles
let background0;
let spriteSoccerBall;
let spritePlayerMainBody0;
// Used for the temporisation of the game AI
var lowerBoundTimingAI = 15;
var upperBoundTimingAI = 35;
var previousTimingAI = 0;
var choosePlayerAI = 0;
var randTimingAI = 0;
// Socket for online multiplayer
var socket;
// Assets preload
function preload() {
background0 = loadImage('../assets/0_background.png');
spriteSoccerBall = loadImage('../assets/sprite_soccer_ball.png');
spritePlayerMainBody0 = loadImage('../assets/sprite_player_main_body0.png');
spritePlayerLeg0 = loadImage('../assets/sprite_player_leg0.png');
spritePlayerLeg1 = loadImage('../assets/sprite_player_leg12.png');
}
// Entry point of code
function setup() {
// Canvas creation
canvas = createCanvas(CANVAS_WIDTH, CANVAS_HEIGHT);
canvas.parent('sketch-holder');
// Draw background
image(background0, 0, 0, CANVAS_WIDTH, CANVAS_HEIGHT);
// Matter.js engine creation
engine = Engine.create();
// Matter.js renderer creation - COMMENT FROM HERE...
/*var render = Render.create({
element: document.body,
engine: engine, | options: {
width: CANVAS_WIDTH,
height: CANVAS_HEIGHT,
showAxes: true,
showConvexHulls: true,
showInternalEdges: true,
showVelocity: true,
showDebug: true,
showAngleIndicator: true
}
});
Render.run(render);*/
// ... TO HERE TO GET RID OF THE RENDERER
// Socket connection
//socket = io.connect('http://localhost:3000');
//Engine.run(engine);
world = engine.world;
// INSTANCIATIONS
gameManager = new GameManager();
gameManager.init();
gameMenus = new GameMenus();
ground = new Ground(CANVAS_WIDTH / 2, (CANVAS_HEIGHT + (groundHeight / 2) - groundOffset), groundWidth, groundHeight, 0);
ball = new Ball(gameManager.ballOptions);
player1Def = new Player(gameManager.player1DefOptions);
player1Atk = new Player(gameManager.player1AtkOptions);
goal1 = new Goal(gameManager.goal1Options);
player2Atk = new Player(gameManager.player2AtkOptions);
player2Def = new Player(gameManager.player2DefOptions);
goal2 = new Goal(gameManager.goal2Options);
gameTimer = new GameTimer(elapsedTimeSec, elapsedTimeMin);
gameScore = new GameScore();
singlePlayerAILeft = new SinglePlayerAI(previousTimingAI, randTimingAI, choosePlayerAI);
singlePlayerAIRight = new SinglePlayerAI(previousTimingAI, randTimingAI, choosePlayerAI);
world.gravity.y = 1;
randTimingAI = random(lowerBoundTimingAI, upperBoundTimingAI);
choosePlayerAI = random(0.0, 1.0);
}
// p5.js animation loop
function draw() {
// Drawing main menu
if (menu == MAIN_MENU) {
gameMenus.draw(menu);
}
// 1 PLAYER - LOCAL | CHOOSE SIDE
if (menu == P1_LOCAL_CHOOSE_SIDE_MENU) {
gameMenus.draw(menu);
}
// 2 PLAYERS - LOCAL
if (menu == P2_LOCAL_SELECTED) {
main();
}
// 2 PLAYERS - ONLINE
if (menu == P2_ONLINE_MENU) {
gameMenus.draw(menu);
}
// 1 PLAYER - LOCAL |LEFT
if (menu == P1_LOCAL_LEFT_SELECTED) {
main();
}
// 1 PLAYER - LOCAL | RIGHT
if (menu == P1_LOCAL_RIGHT_SELECTED) {
gameMenus.draw(menu);
main();
}
//
if (menu == 6) {
gameMenus.draw(menu);
}
//
if (menu == 7) {
gameMenus.draw(menu);
}
// EASTER EGG, SPECTATOR MODE BETWEEN TWO AIs
if (menu == 1000) {
main();
}
}
// EVENT FUNCTIONS
function mouseClicked() {
gameMenus.clickedOn(menu);
}
function keyPressed() {
if (keyCode == 65) {
if (menu == P1_LOCAL_LEFT_SELECTED || menu == P2_LOCAL_SELECTED) {
if (player1Def.isOnGround(ground)) {
player1Def.jump();
}
}
}
if (keyCode == 68) {
if (menu == P1_LOCAL_LEFT_SELECTED || menu == P2_LOCAL_SELECTED) {
if (player1Atk.isOnGround(ground)) {
player1Atk.jump();
}
}
}
if (keyCode == RIGHT_ARROW) {
if (menu == P1_LOCAL_RIGHT_SELECTED || menu == P2_LOCAL_SELECTED) {
if (player2Def.isOnGround(ground)) {
player2Def.jump();
}
}
}
if (keyCode == LEFT_ARROW) {
if (menu == P1_LOCAL_RIGHT_SELECTED || menu == P2_LOCAL_SELECTED) {
if (player2Atk.isOnGround(ground)) {
player2Atk.jump();
}
}
}
}
function keyReleased() {
if (keyCode == 65) {
if (menu == P1_LOCAL_LEFT_SELECTED || menu == P2_LOCAL_SELECTED) {
player1Def.cstrLegs.stiffness = idleLegStiffness;
}
}
if (keyCode == 68) {
if (menu == P1_LOCAL_LEFT_SELECTED || menu == P2_LOCAL_SELECTED) {
player1Atk.cstrLegs.stiffness = idleLegStiffness;
}
}
if (keyCode == RIGHT_ARROW) {
if (menu == P1_LOCAL_RIGHT_SELECTED || menu == P2_LOCAL_SELECTED) {
player2Def.cstrLegs.stiffness = idleLegStiffness;
}
}
if (keyCode == LEFT_ARROW) {
if (menu == P1_LOCAL_RIGHT_SELECTED || menu == P2_LOCAL_SELECTED) {
player2Atk.cstrLegs.stiffness = idleLegStiffness;
}
}
} | |
utils.js | export function randomKey () {
return new Date().getTime().toString() + Math.random().toString().substring(3)
} |
||
add_heuristic_engine.py | import pandas as pd
__author__ = 'slei'
class AddHeuristicTSP:
""" Finds the shortest path using a heuristic method """
def __init__(self, cities_df):
self.df = cities_df
self.edges = list((t.origin, t.destination) for t in df.itertuples())
self.distance = dict([((t.origin, t.destination), t.distance) for t in df.itertuples()])
self.cities = list(set(df['destination']))
self.cities_lst = []
self.tour_lst = []
self.distance_lst = []
self.tour_leg_distances_lst = []
self._final_df = None
self._shortest_distance = None
self._shortest_tour = None
def find_subtour(self, starting_city):
""" Given a starting city, finds a tour by selecting next shortest distance from list of unvisited cities """
tour = []
tour_distance_lst = [0]
cities_unvisited = list(set(self.df['destination']))
initial_city = starting_city
current_city = initial_city
tour.append(current_city)
cities_unvisited.pop(0)
total_distance = 0
count = 0
while len(cities_unvisited) > 0:
# remove any city that has already been visited from consideration
df_unvisited = self.df[self.df['destination'].isin(cities_unvisited)]
# filter for rows based on first criterion
is_current = df_unvisited['origin'] == current_city
df2 = df_unvisited[is_current]
# find the nearest city
index_min = df2['distance'].idxmin()
min_row = df2.loc[index_min]
d = min_row.distance
destination = min_row.destination
# update next city and tour and total distance
current_city = destination
total_distance = total_distance + d
tour_distance_lst.append(d)
# update city tracker lists
tour.append(current_city)
index_i = cities_unvisited.index(current_city)
cities_unvisited.pop(index_i)
count = count + 1
# check
print("next destination: ", destination)
print("distance: ", d)
print("total_distance: ", total_distance)
print("tour: ", tour)
print("tour_distance_lst: ", tour_distance_lst)
print("cities_unvisited: ", cities_unvisited)
print()
# adding the distance from last city back to initial city
last_city = tour[-1]
last_mile = (initial_city, last_city)
last_mile_distance = self.distance[last_mile]
tour.append(initial_city)
total_distance = total_distance + last_mile_distance
tour_distance_lst.append(last_mile_distance)
# check
print("last_mile: ", last_mile)
print("last_mile_distance: ", last_mile_distance)
print("tour: ", tour)
print("total_distance: ", total_distance)
print("tour_leg_distances_lst: ", tour_distance_lst)
# update lists
self.tour_lst.append(tour)
self.distance_lst.append(total_distance) | @property
def final_df(self):
""" Add description here"""
if self._final_df is None:
self._final_df = self._generate_final_df()
return self._final_df
def _generate_final_df(self):
for c in self.cities: # for every city in the dataset
print("city: ", c) # generate a tour for each
print("--------------------------------------------------------------------------------")
self.find_subtour(c)
print('********************************************************************************')
print()
soln_dict = {'city': self.cities, 'tour': self.tour_lst, 'tour_leg_distances': self.tour_leg_distances_lst,
'distance': self.distance_lst}
return pd.DataFrame(soln_dict)
@property
def shortest_distance(self):
""" Add description here"""
if self._shortest_distance is None:
return self._calculate_shortest_distance()
def _calculate_shortest_distance(self): # find the tour with the lowest distance
index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value
min_row_final = self.final_df.loc[index_min_final]
return min_row_final.distance
@property
def shortest_tour(self):
""" Add description here"""
if self._shortest_tour is None:
return self._generate_shortest_tour()
def _generate_shortest_tour(self):
index_min_final = self.final_df['distance'].idxmin() # returns the index location of min value
min_row_final = self.final_df.loc[index_min_final]
return min_row_final.tour
# ********************************************************************************
# ********************************************************************************
if __name__ == '__main__':
df = pd.read_csv('city_data_add.csv')
tsp = AddHeuristicTSP(df)
tsp.final_df
print("final_df")
print(tsp.final_df)
print()
print("shortest_distance_final", tsp.shortest_distance)
print("shortest_tour_final", tsp.shortest_tour) | self.tour_leg_distances_lst.append(tour_distance_lst)
|
test_helper.py | import os
import sys
BASE_DIR = os.getcwd()
sys.path.append(BASE_DIR)
from helper import random, diff
import unittest
class | (unittest.TestCase):
def test_random(self):
for l in [1, 2, 5, 10, 100]:
a = random.randints(0, 1, l)
self.assertEqual(len(a), l, "Not returning correct length, should be {}"
"returning {}".format(l, len(a)))
self.assertTrue(all(1 >= x >= 0 for x in a), "All values are not in limit")
def test_diff(self):
A = {'a': 1, 'b': 3}
B = {'a': 2, 'c': 3, 'd': {'4': 5}}
print(list(diff(A, B)))
print(list(diff(B, A)))
def bin_search(self):
pass
if __name__ == "__main__":
unittest.main()
| TestHelper |
index.js | // @flow |
// TODO(mc, 2018-09-13): these aren't cards; rename
import { InformationCard } from './InformationCard'
import { ProtocolPipettesCard } from './ProtocolPipettesCard'
import { ProtocolModulesCard } from './ProtocolModulesCard'
import { ProtocolLabwareCard } from './ProtocolLabwareCard'
import { Continue } from './Continue'
import { UploadError } from '../UploadError'
import styles from './styles.css'
import type { Robot } from '../../discovery/types'
const NO_STEPS_MESSAGE = `This protocol has no steps in it - there's nothing for your robot to do! Your protocol needs at least one aspirate/dispense to import properly`
export type FileInfoProps = {|
robot: Robot,
sessionLoaded: boolean,
sessionHasSteps: boolean,
uploadError: ?{ message: string },
|}
export function FileInfo(props: FileInfoProps): React.Node {
const { robot, sessionLoaded, sessionHasSteps } = props
let uploadError = props.uploadError
if (sessionLoaded && !uploadError && !sessionHasSteps) {
uploadError = { message: NO_STEPS_MESSAGE }
}
return (
<div className={styles.file_info_container}>
<InformationCard />
<ProtocolPipettesCard robotName={robot.name} />
<ProtocolModulesCard robot={robot} />
<ProtocolLabwareCard />
{uploadError && <UploadError uploadError={uploadError} />}
{sessionLoaded && !uploadError && <Continue />}
</div>
)
} | import * as React from 'react' |
libunwind.rs | //! Backtrace support using libunwind/gcc_s/etc APIs.
//!
//! This module contains the ability to unwind the stack using libunwind-style
//! APIs. Note that there's a whole bunch of implementations of the
//! libunwind-like API, and this is just trying to be compatible with most of
//! them all at once instead of being picky.
//!
//! The libunwind API is powered by `_Unwind_Backtrace` and is in practice very
//! reliable at generating a backtrace. It's not entirely clear how it does it
//! (frame pointers? eh_frame info? both?) but it seems to work!
//!
//! Most of the complexity of this module is handling the various platform
//! differences across libunwind implementations. Otherwise this is a pretty
//! straightforward Rust binding to the libunwind APIs.
//!
//! This is the default unwinding API for all non-Windows platforms currently.
use super::super::Bomb;
use core::ffi::c_void;
pub enum Frame {
Raw(*mut uw::_Unwind_Context),
Cloned {
ip: *mut c_void,
sp: *mut c_void,
symbol_address: *mut c_void,
},
}
// With a raw libunwind pointer it should only ever be access in a readonly
// threadsafe fashion, so it's `Sync`. When sending to other threads via `Clone`
// we always switch to a version which doesn't retain interior pointers, so we
// should be `Send` as well.
unsafe impl Send for Frame {}
unsafe impl Sync for Frame {}
impl Frame {
pub fn ip(&self) -> *mut c_void {
let ctx = match *self {
Frame::Raw(ctx) => ctx,
Frame::Cloned { ip, .. } => return ip,
};
unsafe { uw::_Unwind_GetIP(ctx) as *mut c_void }
}
pub fn sp(&self) -> *mut c_void {
match *self {
Frame::Raw(ctx) => unsafe { uw::get_sp(ctx) as *mut c_void },
Frame::Cloned { sp, .. } => sp,
}
}
pub fn symbol_address(&self) -> *mut c_void {
if let Frame::Cloned { symbol_address, .. } = *self {
return symbol_address;
}
// It seems that on OSX `_Unwind_FindEnclosingFunction` returns a
// pointer to... something that's unclear. It's definitely not always
// the enclosing function for whatever reason. It's not entirely clear
// to me what's going on here, so pessimize this for now and just always
// return the ip.
//
// Note the `skip_inner_frames.rs` test is skipped on OSX due to this
// clause, and if this is fixed that test in theory can be run on OSX!
if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
self.ip()
} else {
unsafe { uw::_Unwind_FindEnclosingFunction(self.ip()) }
}
}
pub fn module_base_address(&self) -> Option<*mut c_void> {
None
}
}
impl Clone for Frame {
fn clone(&self) -> Frame {
Frame::Cloned {
ip: self.ip(),
sp: self.sp(),
symbol_address: self.symbol_address(),
}
}
}
#[inline(always)]
pub unsafe fn trace(mut cb: &mut dyn FnMut(&super::Frame) -> bool) {
uw::_Unwind_Backtrace(trace_fn, &mut cb as *mut _ as *mut _);
extern "C" fn trace_fn(
ctx: *mut uw::_Unwind_Context,
arg: *mut c_void,
) -> uw::_Unwind_Reason_Code {
let cb = unsafe { &mut *(arg as *mut &mut dyn FnMut(&super::Frame) -> bool) };
let cx = super::Frame {
inner: Frame::Raw(ctx),
};
let mut bomb = Bomb { enabled: true };
let keep_going = cb(&cx);
bomb.enabled = false;
if keep_going {
uw::_URC_NO_REASON
} else |
}
}
/// Unwind library interface used for backtraces
///
/// Note that dead code is allowed as here are just bindings
/// iOS doesn't use all of them it but adding more
/// platform-specific configs pollutes the code too much
#[allow(non_camel_case_types)]
#[allow(non_snake_case)]
#[allow(dead_code)]
mod uw {
pub use self::_Unwind_Reason_Code::*;
use core::ffi::c_void;
#[repr(C)]
pub enum _Unwind_Reason_Code {
_URC_NO_REASON = 0,
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
_URC_FATAL_PHASE2_ERROR = 2,
_URC_FATAL_PHASE1_ERROR = 3,
_URC_NORMAL_STOP = 4,
_URC_END_OF_STACK = 5,
_URC_HANDLER_FOUND = 6,
_URC_INSTALL_CONTEXT = 7,
_URC_CONTINUE_UNWIND = 8,
_URC_FAILURE = 9, // used only by ARM EABI
}
pub enum _Unwind_Context {}
pub type _Unwind_Trace_Fn =
extern "C" fn(ctx: *mut _Unwind_Context, arg: *mut c_void) -> _Unwind_Reason_Code;
extern "C" {
// No native _Unwind_Backtrace on iOS
#[cfg(not(all(target_os = "ios", target_arch = "arm")))]
pub fn _Unwind_Backtrace(
trace: _Unwind_Trace_Fn,
trace_argument: *mut c_void,
) -> _Unwind_Reason_Code;
// available since GCC 4.2.0, should be fine for our purpose
#[cfg(all(
not(all(target_os = "android", target_arch = "arm")),
not(all(target_os = "freebsd", target_arch = "arm")),
not(all(target_os = "linux", target_arch = "arm"))
))]
pub fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t;
#[cfg(all(
not(all(target_os = "android", target_arch = "arm")),
not(all(target_os = "freebsd", target_arch = "arm")),
not(all(target_os = "linux", target_arch = "arm"))
))]
pub fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void;
#[cfg(all(
not(all(target_os = "android", target_arch = "arm")),
not(all(target_os = "freebsd", target_arch = "arm")),
not(all(target_os = "linux", target_arch = "arm")),
not(all(target_os = "linux", target_arch = "s390x"))
))]
// This function is a misnomer: rather than getting this frame's
// Canonical Frame Address (aka the caller frame's SP) it
// returns this frame's SP.
//
// https://github.com/libunwind/libunwind/blob/d32956507cf29d9b1a98a8bce53c78623908f4fe/src/unwind/GetCFA.c#L28-L35
#[link_name = "_Unwind_GetCFA"]
pub fn get_sp(ctx: *mut _Unwind_Context) -> libc::uintptr_t;
}
// s390x uses a biased CFA value, therefore we need to use
// _Unwind_GetGR to get the stack pointer register (%r15)
// instead of relying on _Unwind_GetCFA.
#[cfg(all(target_os = "linux", target_arch = "s390x"))]
pub unsafe fn get_sp(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
extern "C" {
pub fn _Unwind_GetGR(ctx: *mut _Unwind_Context, index: libc::c_int) -> libc::uintptr_t;
}
_Unwind_GetGR(ctx, 15)
}
// On android and arm, the function `_Unwind_GetIP` and a bunch of others
// are macros, so we define functions containing the expansion of the
// macros.
//
// TODO: link to the header file that defines these macros, if you can find
// it. (I, fitzgen, cannot find the header file that some of these macro
// expansions were originally borrowed from.)
#[cfg(any(
all(target_os = "android", target_arch = "arm"),
all(target_os = "freebsd", target_arch = "arm"),
all(target_os = "linux", target_arch = "arm")
))]
pub use self::arm::*;
#[cfg(any(
all(target_os = "android", target_arch = "arm"),
all(target_os = "freebsd", target_arch = "arm"),
all(target_os = "linux", target_arch = "arm")
))]
mod arm {
pub use super::*;
#[repr(C)]
enum _Unwind_VRS_Result {
_UVRSR_OK = 0,
_UVRSR_NOT_IMPLEMENTED = 1,
_UVRSR_FAILED = 2,
}
#[repr(C)]
enum _Unwind_VRS_RegClass {
_UVRSC_CORE = 0,
_UVRSC_VFP = 1,
_UVRSC_FPA = 2,
_UVRSC_WMMXD = 3,
_UVRSC_WMMXC = 4,
}
#[repr(C)]
enum _Unwind_VRS_DataRepresentation {
_UVRSD_UINT32 = 0,
_UVRSD_VFPX = 1,
_UVRSD_FPAX = 2,
_UVRSD_UINT64 = 3,
_UVRSD_FLOAT = 4,
_UVRSD_DOUBLE = 5,
}
type _Unwind_Word = libc::c_uint;
extern "C" {
fn _Unwind_VRS_Get(
ctx: *mut _Unwind_Context,
klass: _Unwind_VRS_RegClass,
word: _Unwind_Word,
repr: _Unwind_VRS_DataRepresentation,
data: *mut c_void,
) -> _Unwind_VRS_Result;
}
pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
let mut val: _Unwind_Word = 0;
let ptr = &mut val as *mut _Unwind_Word;
let _ = _Unwind_VRS_Get(
ctx,
_Unwind_VRS_RegClass::_UVRSC_CORE,
15,
_Unwind_VRS_DataRepresentation::_UVRSD_UINT32,
ptr as *mut c_void,
);
(val & !1) as libc::uintptr_t
}
// R13 is the stack pointer on arm.
const SP: _Unwind_Word = 13;
pub unsafe fn get_sp(ctx: *mut _Unwind_Context) -> libc::uintptr_t {
let mut val: _Unwind_Word = 0;
let ptr = &mut val as *mut _Unwind_Word;
let _ = _Unwind_VRS_Get(
ctx,
_Unwind_VRS_RegClass::_UVRSC_CORE,
SP,
_Unwind_VRS_DataRepresentation::_UVRSD_UINT32,
ptr as *mut c_void,
);
val as libc::uintptr_t
}
// This function also doesn't exist on Android or ARM/Linux, so make it
// a no-op.
pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void {
pc
}
}
}
| {
uw::_URC_FAILURE
} |
loading.py | # orm/loading.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to convert database
rows into object instances and associated state.
the functions here are called primarily by Query, Mapper,
as well as some of the attribute loading strategies.
"""
from __future__ import absolute_import
from .. import util
from . import attributes, exc as orm_exc
from ..sql import util as sql_util
from . import strategy_options
from .util import _none_set, state_str
from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE
from .. import exc as sa_exc
import collections
_new_runid = util.counter()
def instances(query, cursor, context):
"""Return an ORM result as an iterator."""
context.runid = _new_runid()
filtered = query._has_mapper_entities
single_entity = len(query._entities) == 1 and \
query._entities[0].supports_single_entity
if filtered:
if single_entity:
filter_fn = id
else:
def filter_fn(row):
return tuple(
id(item)
if ent.use_id_for_hash
else item
for ent, item in zip(query._entities, row)
)
try:
(process, labels) = \
list(zip(*[
query_entity.row_processor(query,
context, cursor)
for query_entity in query._entities
]))
if not single_entity:
keyed_tuple = util.lightweight_named_tuple('result', labels)
while True:
context.partials = {}
if query._yield_per:
fetch = cursor.fetchmany(query._yield_per)
if not fetch:
break
else:
fetch = cursor.fetchall()
if single_entity:
proc = process[0]
rows = [proc(row) for row in fetch]
else:
rows = [keyed_tuple([proc(row) for proc in process])
for row in fetch]
if filtered:
rows = util.unique_list(rows, filter_fn)
for row in rows:
yield row
if not query._yield_per:
break
except Exception as err:
cursor.close()
util.raise_from_cause(err)
@util.dependencies("sqlalchemy.orm.query")
def merge_result(querylib, query, iterator, load=True):
"""Merge a result into this :class:`.Query` object's Session."""
session = query.session
if load:
# flush current contents if we expect to load data
session._autoflush()
autoflush = session.autoflush
try:
session.autoflush = False
single_entity = len(query._entities) == 1
if single_entity:
if isinstance(query._entities[0], querylib._MapperEntity):
result = [session._merge(
attributes.instance_state(instance),
attributes.instance_dict(instance),
load=load, _recursive={}, _resolve_conflict_map={})
for instance in iterator]
else:
result = list(iterator)
else:
mapped_entities = [i for i, e in enumerate(query._entities)
if isinstance(e, querylib._MapperEntity)]
result = []
keys = [ent._label_name for ent in query._entities]
keyed_tuple = util.lightweight_named_tuple('result', keys)
for row in iterator:
newrow = list(row)
for i in mapped_entities:
if newrow[i] is not None:
newrow[i] = session._merge(
attributes.instance_state(newrow[i]),
attributes.instance_dict(newrow[i]),
load=load, _recursive={}, _resolve_conflict_map={})
result.append(keyed_tuple(newrow))
return iter(result)
finally:
session.autoflush = autoflush
def get_from_identity(session, key, passive):
"""Look up the given key in the given session's identity map,
check the object for expired state if found.
"""
instance = session.identity_map.get(key)
if instance is not None:
state = attributes.instance_state(instance)
# expired - ensure it still exists
if state.expired:
if not passive & attributes.SQL_OK:
# TODO: no coverage here
return attributes.PASSIVE_NO_RESULT
elif not passive & attributes.RELATED_OBJECT_OK:
# this mode is used within a flush and the instance's
# expired state will be checked soon enough, if necessary
return instance
try:
state._load_expired(state, passive)
except orm_exc.ObjectDeletedError:
session._remove_newly_deleted([state])
return None
return instance
else:
return None
def load_on_ident(query, key,
refresh_state=None, lockmode=None,
only_load_props=None):
"""Load the given identity key from the database."""
if key is not None:
ident = key[1]
else:
ident = None
if refresh_state is None:
q = query._clone()
q._get_condition()
else:
q = query._clone()
if ident is not None:
mapper = query._mapper_zero()
(_get_clause, _get_params) = mapper._get_clause
# None present in ident - turn those comparisons
# into "IS NULL"
if None in ident:
nones = set([
_get_params[col].key for col, value in
zip(mapper.primary_key, ident) if value is None
])
_get_clause = sql_util.adapt_criterion_to_null(
_get_clause, nones)
_get_clause = q._adapt_clause(_get_clause, True, False)
q._criterion = _get_clause
params = dict([
(_get_params[primary_key].key, id_val)
for id_val, primary_key in zip(ident, mapper.primary_key)
])
q._params = params
if lockmode is not None:
version_check = True
q = q.with_lockmode(lockmode)
elif query._for_update_arg is not None:
version_check = True
q._for_update_arg = query._for_update_arg
else:
version_check = False
q._get_options(
populate_existing=bool(refresh_state),
version_check=version_check,
only_load_props=only_load_props,
refresh_state=refresh_state)
q._order_by = None
try:
return q.one()
except orm_exc.NoResultFound:
return None
def _setup_entity_query(
context, mapper, query_entity,
path, adapter, column_collection,
with_polymorphic=None, only_load_props=None,
polymorphic_discriminator=None, **kw):
if with_polymorphic:
poly_properties = mapper._iterate_polymorphic_properties(
with_polymorphic)
else:
poly_properties = mapper._polymorphic_properties
quick_populators = {}
path.set(
context.attributes,
"memoized_setups",
quick_populators)
for value in poly_properties:
if only_load_props and \
value.key not in only_load_props:
continue
value.setup(
context,
query_entity,
path,
adapter,
only_load_props=only_load_props,
column_collection=column_collection,
memoized_populators=quick_populators,
**kw
)
if polymorphic_discriminator is not None and \
polymorphic_discriminator \
is not mapper.polymorphic_on:
if adapter:
pd = adapter.columns[polymorphic_discriminator]
else:
pd = polymorphic_discriminator
column_collection.append(pd)
def _instance_processor(
mapper, context, result, path, adapter,
only_load_props=None, refresh_state=None,
polymorphic_discriminator=None,
_polymorphic_from=None):
|
def _populate_full(
context, row, state, dict_, isnew, load_path,
loaded_instance, populate_existing, populators):
if isnew:
# first time we are seeing a row with this identity.
state.runid = context.runid
for key, getter in populators["quick"]:
dict_[key] = getter(row)
if populate_existing:
for key, set_callable in populators["expire"]:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
else:
for key, set_callable in populators["expire"]:
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
populator(state, dict_, row)
elif load_path != state.load_path:
# new load path, e.g. object is present in more than one
# column position in a series of rows
state.load_path = load_path
# if we have data, and the data isn't in the dict, OK, let's put
# it in.
for key, getter in populators["quick"]:
if key not in dict_:
dict_[key] = getter(row)
# otherwise treat like an "already seen" row
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: allow "existing" populator to know this is
# a new path for the state:
# populator(state, dict_, row, new_path=True)
else:
# have already seen rows with this identity in this same path.
for key, populator in populators["existing"]:
populator(state, dict_, row)
# TODO: same path
# populator(state, dict_, row, new_path=False)
def _populate_partial(
context, row, state, dict_, isnew, load_path,
unloaded, populators):
if not isnew:
to_load = context.partials[state]
for key, populator in populators["existing"]:
if key in to_load:
populator(state, dict_, row)
else:
to_load = unloaded
context.partials[state] = to_load
for key, getter in populators["quick"]:
if key in to_load:
dict_[key] = getter(row)
for key, set_callable in populators["expire"]:
if key in to_load:
dict_.pop(key, None)
if set_callable:
state.expired_attributes.add(key)
for key, populator in populators["new"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["delayed"]:
if key in to_load:
populator(state, dict_, row)
for key, populator in populators["eager"]:
if key not in unloaded:
populator(state, dict_, row)
return to_load
def _validate_version_id(mapper, state, dict_, row, adapter):
version_id_col = mapper.version_id_col
if version_id_col is None:
return
if adapter:
version_id_col = adapter.columns[version_id_col]
if mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col) != row[version_id_col]:
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (state_str(state), mapper._get_state_attr_by_column(
state, dict_, mapper.version_id_col),
row[version_id_col]))
def _decorate_polymorphic_switch(
instance_fn, context, mapper, result, path,
polymorphic_discriminator, adapter):
if polymorphic_discriminator is not None:
polymorphic_on = polymorphic_discriminator
else:
polymorphic_on = mapper.polymorphic_on
if polymorphic_on is None:
return instance_fn
if adapter:
polymorphic_on = adapter.columns[polymorphic_on]
def configure_subclass_mapper(discriminator):
try:
sub_mapper = mapper.polymorphic_map[discriminator]
except KeyError:
raise AssertionError(
"No such polymorphic_identity %r is defined" %
discriminator)
else:
if sub_mapper is mapper:
return None
return _instance_processor(
sub_mapper, context, result,
path, adapter, _polymorphic_from=mapper)
polymorphic_instances = util.PopulateDict(
configure_subclass_mapper
)
def polymorphic_instance(row):
discriminator = row[polymorphic_on]
if discriminator is not None:
_instance = polymorphic_instances[discriminator]
if _instance:
return _instance(row)
return instance_fn(row)
return polymorphic_instance
def load_scalar_attributes(mapper, state, attribute_names):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" %
(state_str(state)))
has_key = bool(state.key)
result = False
if mapper.inherits and not mapper.concrete:
# because we are using Core to produce a select() that we
# pass to the Query, we aren't calling setup() for mapped
# attributes; in 1.0 this means deferred attrs won't get loaded
# by default
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
result = load_on_ident(
session.query(mapper).
options(
strategy_options.Load(mapper).undefer("*")
).from_statement(statement),
None,
only_load_props=attribute_names,
refresh_state=state
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [mapper._columntoproperty[col].key
for col in mapper.primary_key]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state))
identity_key = mapper._identity_key_from_state(state)
if (_none_set.issubset(identity_key) and
not mapper.allow_partial_pks) or \
_none_set.issuperset(identity_key):
util.warn_limited(
"Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either).",
state_str(state))
return
result = load_on_ident(
session.query(mapper),
identity_key,
refresh_state=state,
only_load_props=attribute_names)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
| """Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
pk_cols = mapper.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
identity_class = mapper._identity_class
populators = collections.defaultdict(list)
props = mapper._prop_set
if only_load_props is not None:
props = props.intersection(
mapper._props[k] for k in only_load_props)
quick_populators = path.get(
context.attributes, "memoized_setups", _none_set)
for prop in props:
if prop in quick_populators:
# this is an inlined path just for column-based attributes.
col = quick_populators[prop]
if col is _DEFER_FOR_STATE:
populators["new"].append(
(prop.key, prop._deferred_column_loader))
elif col is _SET_DEFERRED_EXPIRED:
# note that in this path, we are no longer
# searching in the result to see if the column might
# be present in some unexpected way.
populators["expire"].append((prop.key, False))
else:
if adapter:
col = adapter.columns[col]
getter = result._getter(col, False)
if getter:
populators["quick"].append((prop.key, getter))
else:
# fall back to the ColumnProperty itself, which
# will iterate through all of its columns
# to see if one fits
prop.create_row_processor(
context, path, mapper, result, adapter, populators)
else:
prop.create_row_processor(
context, path, mapper, result, adapter, populators)
propagate_options = context.propagate_options
load_path = context.query._current_path + path \
if context.query._current_path.path else path
session_identity_map = context.session.identity_map
populate_existing = context.populate_existing or mapper.always_refresh
load_evt = bool(mapper.class_manager.dispatch.load)
refresh_evt = bool(mapper.class_manager.dispatch.refresh)
persistent_evt = bool(context.session.dispatch.loaded_as_persistent)
if persistent_evt:
loaded_as_persistent = context.session.dispatch.loaded_as_persistent
instance_state = attributes.instance_state
instance_dict = attributes.instance_dict
session_id = context.session.hash_key
version_check = context.version_check
runid = context.runid
if refresh_state:
refresh_identity_key = refresh_state.key
if refresh_identity_key is None:
# super-rare condition; a refresh is being called
# on a non-instance-key instance; this is meant to only
# occur within a flush()
refresh_identity_key = \
mapper._identity_key_from_state(refresh_state)
else:
refresh_identity_key = None
if mapper.allow_partial_pks:
is_not_primary_key = _none_set.issuperset
else:
is_not_primary_key = _none_set.intersection
def _instance(row):
# determine the state that we'll be populating
if refresh_identity_key:
# fixed state that we're refreshing
state = refresh_state
instance = state.obj()
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = True
loaded_instance = False
else:
# look at the row, see if that identity is in the
# session, or we have to create a new one
identitykey = (
identity_class,
tuple([row[column] for column in pk_cols])
)
instance = session_identity_map.get(identitykey)
if instance is not None:
# existing instance
state = instance_state(instance)
dict_ = instance_dict(instance)
isnew = state.runid != runid
currentload = not isnew
loaded_instance = False
if version_check and not currentload:
_validate_version_id(mapper, state, dict_, row, adapter)
else:
# create a new instance
# check for non-NULL values in the primary key columns,
# else no entity is returned for the row
if is_not_primary_key(identitykey[1]):
return None
isnew = True
currentload = True
loaded_instance = True
instance = mapper.class_manager.new_instance()
dict_ = instance_dict(instance)
state = instance_state(instance)
state.key = identitykey
# attach instance to session.
state.session_id = session_id
session_identity_map._add_unpresent(state, identitykey)
# populate. this looks at whether this state is new
# for this load or was existing, and whether or not this
# row is the first row with this identity.
if currentload or populate_existing:
# full population routines. Objects here are either
# just created, or we are doing a populate_existing
# be conservative about setting load_path when populate_existing
# is in effect; want to maintain options from the original
# load. see test_expire->test_refresh_maintains_deferred_options
if isnew and (propagate_options or not populate_existing):
state.load_options = propagate_options
state.load_path = load_path
_populate_full(
context, row, state, dict_, isnew, load_path,
loaded_instance, populate_existing, populators)
if isnew:
if loaded_instance:
if load_evt:
state.manager.dispatch.load(state, context)
if persistent_evt:
loaded_as_persistent(context.session, state.obj())
elif refresh_evt:
state.manager.dispatch.refresh(
state, context, only_load_props)
if populate_existing or state.modified:
if refresh_state and only_load_props:
state._commit(dict_, only_load_props)
else:
state._commit_all(dict_, session_identity_map)
else:
# partial population routines, for objects that were already
# in the Session, but a row matches them; apply eager loaders
# on existing objects, etc.
unloaded = state.unloaded
isnew = state not in context.partials
if not isnew or unloaded or populators["eager"]:
# state is having a partial set of its attributes
# refreshed. Populate those attributes,
# and add to the "context.partials" collection.
to_load = _populate_partial(
context, row, state, dict_, isnew, load_path,
unloaded, populators)
if isnew:
if refresh_evt:
state.manager.dispatch.refresh(
state, context, to_load)
state._commit(dict_, to_load)
return instance
if mapper.polymorphic_map and not _polymorphic_from and not refresh_state:
# if we are doing polymorphic, dispatch to a different _instance()
# method specific to the subclass mapper
_instance = _decorate_polymorphic_switch(
_instance, context, mapper, result, path,
polymorphic_discriminator, adapter)
return _instance |
mod.rs | extern crate serde_json;
mod bookmark;
use std::{fs, process};
use std::collections::HashSet;
pub use bookmark::Bookmark;
/// Memoire contains a list of Bookmark and file path to the json records
#[derive(Hash)]
pub struct Memoire {
bookmarks: Vec<Bookmark>,
file_path: String
}
/// SearchResult contains id/index of the bookmark and a copy of bookmark
#[derive(Hash, Eq, PartialEq, Clone)]
pub struct SearchResult {
id: usize,
bookmark: Bookmark
}
impl SearchResult {
pub fn new(id: usize, bookmark: &Bookmark) -> SearchResult {
SearchResult {
id,
bookmark: bookmark.clone()
}
}
pub fn get_bookmark(&self) -> &Bookmark {
&self.bookmark
}
pub fn get_id(&self) -> usize {
self.id
}
}
fn | (path: &str) -> Vec<Bookmark> {
match fs::read_to_string(&path) {
Ok(data) => {
// Err if Bookmark struct contains attribute json string does not have
// Find a way to fix that for future compatibility reason
match serde_json::from_str(&data) {
Ok(v) => v,
Err(_err) => {
println!("Unable to parse file: {:?}", &path);
process::exit(0);
}
}
},
Err(_err) => {
println!("Unable to read file: {:?}", &path);
process::exit(0);
}
}
}
fn write_bookmarks(path: &str, bookmarks: &Vec<Bookmark>) {
// TODO: Error handling
let json_str = serde_json::to_string(&bookmarks).expect("Unable to parse bookmarks");
fs::write(path, json_str).expect("Unable to write file");
}
impl Memoire {
pub fn load_from(file_path: &str) -> Memoire {
Memoire {
bookmarks: read_bookmarks(&file_path),
file_path: file_path.to_string()
}
}
pub fn add_bookmark(&mut self, command: &str, annotation: &str, tags: &Vec<String>) {
self.bookmarks.push(
Bookmark::new(&command, &annotation, &tags)
);
write_bookmarks(&self.file_path, &self.bookmarks);
}
pub fn remove_bookmark(&mut self, id: usize) {
if id < self.bookmarks.len() {
self.bookmarks.remove(id);
write_bookmarks(&self.file_path, &self.bookmarks);
} else {
println!("Cannot find such bookmark.");
}
}
pub fn edit_bookmark(&mut self, id: usize, command: Option<&str>, annotation: Option<&str>, tags: Option<&Vec<String>>) {
if id < self.bookmarks.len() {
let bookmark: &mut Bookmark = &mut self.bookmarks[id];
match command {
Some(c) => {
if c != bookmark.get_command().to_string() {
bookmark.set_command(&c);
}
},
None => {}
}
match annotation {
Some(a) => {
if a != bookmark.get_annotation().to_string() {
bookmark.set_annotation(&a);
}
},
None => {}
}
match tags {
Some(t) => {
bookmark.set_tags(&t);
},
None => {}
}
write_bookmarks(&self.file_path, &self.bookmarks);
} else {
println!("Cannot find such bookmark.");
}
}
/// Get all bookmarks in memoire
pub fn all(&self) -> HashSet<SearchResult> {
self.search(true, true, true, "")
}
// TODO: Update contains for case insensitive match
pub fn search(&self, in_command: bool, in_annotation: bool, in_tags: bool, search_str: &str) -> HashSet<SearchResult> {
let mut search_results: HashSet<SearchResult> = HashSet::new();
for (i, bookmark) in self.bookmarks.iter().enumerate() {
if (in_command && bookmark.command_contains(&search_str)) ||
(in_annotation && bookmark.annotation_contains(&search_str)) ||
(in_tags && bookmark.tags_contains(&search_str)) {
search_results.insert(SearchResult::new(i, bookmark));
}
}
search_results
}
}
| read_bookmarks |
CastConnectedIcon.tsx | import React from 'react';
// tslint:disable-next-line:ordered-imports
import { Svg, Path } from 'react-native-svg';
import {
processComponent, |
let CastConnectedIcon: React.ComponentType<RfxSvgPropsOptional> = (
props: RfxSvgPropsOptional,
) => (
<RfxSvgIcon {...props}>
<Svg viewBox="0 0 24 24">
<Path d="M1 18v3h3c0-1.66-1.34-3-3-3zm0-4v2c2.76 0 5 2.24 5 5h2c0-3.87-3.13-7-7-7zm18-7H5v1.63c3.96 1.28 7.09 4.41 8.37 8.37H19V7zM1 10v2c4.97 0 9 4.03 9 9h2c0-6.08-4.93-11-11-11zm20-7H3c-1.1 0-2 .9-2 2v3h2V5h18v14h-7v2h7c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2z" />
</Svg>
</RfxSvgIcon>
);
CastConnectedIcon = processComponent<RfxSvgPropsOptional>(CastConnectedIcon, {
name: 'CastConnectedIcon',
});
export { CastConnectedIcon }; | RfxSvgIcon,
RfxSvgPropsOptional,
} from '@reflex-ui/core'; |
fsOSNix.go | //go:build !windows
// +build !windows
package apptest
import (
"fmt"
"os"
"os/user"
"strconv"
"syscall"
)
func | (filePath string, fe *FileExpectation) (isMatched bool, reason string, err error) {
info, err := os.Stat(filePath)
if err != nil {
return false, "", err
}
if fe.ExpectedMode > 0 && fe.ExpectedMode != info.Mode() {
return false, fmt.Sprintf("file '%s' has FileMode '%v' but '%v' was expected", filePath, info.Mode(), fe.ExpectedMode), nil
}
if fe.ExpectedGroup == "" && fe.ExpectedUser == "" {
return true, "", nil
}
var fileUser string
var fileGroup string
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
fileUserI, err := user.LookupId(strconv.Itoa(int(stat.Uid)))
if err != nil {
return false, "", err
}
fileUser = fileUserI.Name
fileGroupI, err := user.LookupGroupId(strconv.Itoa(int(stat.Gid)))
if err != nil {
return false, "", err
}
fileGroup = fileGroupI.Name
}
if fe.ExpectedUser != "" && fileUser != fe.ExpectedUser {
return false, fmt.Sprintf("file '%s' should have '%s' as owner but has '%s'", filePath, fe.ExpectedUser, fileUser), nil
}
if fe.ExpectedGroup != "" && fileGroup != fe.ExpectedGroup {
return false, fmt.Sprintf("file '%s' should have '%s' as group but has '%s'", filePath, fe.ExpectedGroup, fileGroup), nil
}
return true, "", nil
}
| AssertFileMatchesExpectationOS |
merge_scan.py | from typing import Any, Callable
from rx import defer, from_future, of
from rx.core import Observable
from rx.core.typing import Accumulator
from rx.disposable import CompositeDisposable, SingleAssignmentDisposable
from rx.internal.concurrency import synchronized
from rx.internal.utils import NotSet, is_future
def _merge_scan(accumulator: Accumulator, seed: Any = NotSet) -> Callable[[Observable], Observable]:
def merge_scan(source: Observable) -> Observable:
"""Partially applied merge_scan operator.
Applies an accumulator function, which returns an observable sequence,
over an observable sequence and returns each intermediate result.
Examples:
>>> scanned = merge_scan(source)
Args:
source: The observable source to scan.
Returns:
An observable sequence containing the accumulated values.
"""
def subscribe(observer, scheduler=None):
accumulator_value = [seed]
active = [False]
group = CompositeDisposable()
is_stopped = [False]
queue = []
def subscribe(xs):
subscription = SingleAssignmentDisposable()
group.add(subscription)
@synchronized(source.lock)
def on_next(next_accumulator_value):
accumulator_value[0] = next_accumulator_value
observer.on_next(next_accumulator_value)
@synchronized(source.lock)
def on_completed():
|
on_error = synchronized(source.lock)(observer.on_error)
subscription.disposable = xs.subscribe_(on_next, on_error, on_completed, scheduler)
def on_next(value):
def accumulate():
has_accumulator_value = accumulator_value[0] is not NotSet
if has_accumulator_value:
acc_source = accumulator(accumulator_value[0], value)
return from_future(acc_source) if is_future(acc_source) else acc_source
else:
return of(value)
accumulator_source = defer(lambda _: accumulate())
if not active[0]:
active[0] = True
subscribe(accumulator_source)
else:
queue.append(accumulator_source)
def on_completed():
is_stopped[0] = True
if not active[0]:
observer.on_completed()
group.add(source.subscribe_(on_next, observer.on_error, on_completed, scheduler))
return group
return Observable(subscribe)
return merge_scan
| group.remove(subscription)
if queue:
s = queue.pop(0)
subscribe(s)
else:
active[0] = False
if is_stopped[0]:
observer.on_completed() |
pivot.rs | use super::GroupBy;
use crate::chunked_array::builder::get_list_builder;
use crate::prelude::*;
use hashbrown::HashMap;
use num::{Num, NumCast, Zero};
use std::cmp::Ordering;
use std::collections::hash_map::RandomState;
use std::fmt::{Debug, Formatter};
use std::ops::{Add, Deref};
/// Utility enum used for grouping on multiple columns
#[derive(Copy, Clone, Hash, Eq, PartialEq)]
pub(crate) enum Groupable<'a> {
Boolean(bool),
Utf8(&'a str),
UInt32(u32),
UInt64(u64),
Int32(i32),
Int64(i64),
}
impl<'a> Debug for Groupable<'a> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
use Groupable::*;
match self {
Boolean(v) => write!(f, "{}", v),
Utf8(v) => write!(f, "{}", v),
UInt32(v) => write!(f, "{}", v),
UInt64(v) => write!(f, "{}", v),
Int32(v) => write!(f, "{}", v),
Int64(v) => write!(f, "{}", v),
}
}
}
impl Series {
pub(crate) fn as_groupable_iter<'a>(
// mutable reference is needed to put an owned cast to back to the callers location.
// this allows us to return a reference to 'a
// This still is quite hacky. This should probably be reimplemented.
&'a mut self,
) -> Result<Box<dyn Iterator<Item = Option<Groupable>> + 'a + Send>> {
macro_rules! as_groupable_iter {
($ca:expr, $variant:ident ) => {{
let bx = Box::new($ca.into_iter().map(|opt_b| opt_b.map(Groupable::$variant)));
Ok(bx)
}};
}
match self.dtype() {
DataType::Boolean => as_groupable_iter!(self.bool().unwrap(), Boolean),
DataType::Int8 | DataType::UInt8 | DataType::Int16 | DataType::UInt16 => {
let s = self.cast(&DataType::Int32)?;
*self = s;
self.as_groupable_iter()
}
DataType::UInt32 => as_groupable_iter!(self.u32().unwrap(), UInt32),
DataType::UInt64 => as_groupable_iter!(self.u64().unwrap(), UInt64),
DataType::Int32 => as_groupable_iter!(self.i32().unwrap(), Int32),
DataType::Int64 => as_groupable_iter!(self.i64().unwrap(), Int64),
DataType::Utf8 => as_groupable_iter!(self.utf8().unwrap(), Utf8),
DataType::Float32 => {
let s = self.f32()?.bit_repr_small().into_series();
*self = s;
self.as_groupable_iter()
}
DataType::Float64 => {
let s = self.f64()?.bit_repr_small().into_series();
*self = s;
self.as_groupable_iter()
}
#[cfg(feature = "dtype-categorical")]
DataType::Categorical => {
let s = self.cast(&DataType::UInt32)?;
*self = s;
self.as_groupable_iter()
}
dt => Err(PolarsError::ComputeError(
format!("Column with dtype {:?} is not groupable", dt).into(),
)),
}
}
}
impl<'df, 'selection_str> GroupBy<'df, 'selection_str> {
/// Pivot a column of the current `DataFrame` and perform one of the following aggregations:
///
/// * first
/// * sum
/// * min
/// * max
/// * mean
/// * median
///
/// The pivot operation consists of a group by one, or multiple columns (these will be the new
/// y-axis), column that will be pivoted (this will be the new x-axis) and an aggregation.
///
/// # Panics
/// If the values column is not a numerical type, the code will panic.
///
/// # Example
///
/// ```rust
/// use polars_core::prelude::*;
/// use polars_core::df;
///
/// fn example() -> Result<DataFrame> {
/// let df = df!("foo" => &["A", "A", "B", "B", "C"],
/// "N" => &[1, 2, 2, 4, 2],
/// "bar" => &["k", "l", "m", "n", "0"]
/// )?;
///
/// df.groupby("foo")?
/// .pivot("bar", "N")
/// .first()
/// }
/// ```
/// Transforms:
///
/// ```text
/// +-----+-----+-----+
/// | foo | N | bar |
/// | --- | --- | --- |
/// | str | i32 | str |
/// +=====+=====+=====+
/// | "A" | 1 | "k" |
/// +-----+-----+-----+
/// | "A" | 2 | "l" |
/// +-----+-----+-----+
/// | "B" | 2 | "m" |
/// +-----+-----+-----+
/// | "B" | 4 | "n" |
/// +-----+-----+-----+
/// | "C" | 2 | "o" |
/// +-----+-----+-----+
/// ```
///
/// Into:
///
/// ```text
/// +-----+------+------+------+------+------+
/// | foo | o | n | m | l | k |
/// | --- | --- | --- | --- | --- | --- |
/// | str | i32 | i32 | i32 | i32 | i32 |
/// +=====+======+======+======+======+======+
/// | "A" | null | null | null | 2 | 1 |
/// +-----+------+------+------+------+------+
/// | "B" | null | 4 | 2 | null | null |
/// +-----+------+------+------+------+------+
/// | "C" | 2 | null | null | null | null |
/// +-----+------+------+------+------+------+
/// ```
#[cfg_attr(docsrs, doc(cfg(feature = "pivot")))]
pub fn pivot(
&mut self,
pivot_column: &'selection_str str,
values_column: &'selection_str str,
) -> Pivot {
// same as select method
self.selected_agg = Some(vec![pivot_column, values_column]);
Pivot {
gb: self,
pivot_column,
values_column,
}
}
}
/// Intermediate structure when a `pivot` operation is applied.
/// See [the pivot method for more information.](../group_by/struct.GroupBy.html#method.pivot)
#[cfg_attr(docsrs, doc(cfg(feature = "pivot")))]
pub struct Pivot<'df, 'selection_str> {
gb: &'df GroupBy<'df, 'selection_str>,
pivot_column: &'selection_str str,
values_column: &'selection_str str,
}
pub(crate) trait ChunkPivot {
fn pivot<'a>(
&self,
_pivot_series: &'a Series,
_keys: Vec<Series>,
_groups: &[(u32, Vec<u32>)],
_agg_type: PivotAgg,
) -> Result<DataFrame> {
Err(PolarsError::InvalidOperation(
"Pivot operation not implemented for this type".into(),
))
}
fn pivot_count<'a>(
&self,
_pivot_series: &'a Series,
_keys: Vec<Series>,
_groups: &[(u32, Vec<u32>)],
) -> Result<DataFrame> {
Err(PolarsError::InvalidOperation(
"Pivot count operation not implemented for this type".into(),
))
}
}
/// Create a hashmap that maps column/keys names to values. This is not yet the result of the aggregation.
fn create_column_values_map<'a, T>(
pivot_vec: &'a [Option<Groupable>],
size: usize,
) -> HashMap<&'a Groupable<'a>, Vec<Option<T>>, RandomState> {
let mut columns_agg_map = HashMap::with_capacity_and_hasher(size, RandomState::new());
for column_name in pivot_vec.iter().flatten() {
columns_agg_map.entry(column_name).or_insert_with(Vec::new);
}
columns_agg_map
}
/// Create a hashmap that maps columns/keys to the result of the aggregation.
fn create_new_column_builder_map<'a, T>(
pivot_vec: &'a [Option<Groupable>],
groups: &[(u32, Vec<u32>)],
) -> PlHashMap<&'a Groupable<'a>, PrimitiveChunkedBuilder<T>>
where
T: PolarsNumericType,
{
// create a hash map that will be filled with the results of the aggregation.
let mut columns_agg_map_main = PlHashMap::new();
for column_name in pivot_vec.iter().flatten() {
columns_agg_map_main.entry(column_name).or_insert_with(|| {
PrimitiveChunkedBuilder::<T>::new(&format!("{:?}", column_name), groups.len())
});
}
columns_agg_map_main
}
fn sort_cols(cols: &mut [Series]) {
(&mut cols[1..]).sort_unstable_by(|s1, s2| {
if s1.name() > s2.name() {
Ordering::Greater
} else {
Ordering::Less
}
});
}
impl<T> ChunkPivot for ChunkedArray<T>
where
T: PolarsNumericType,
T::Native: Copy + Num + NumCast + PartialOrd,
ChunkedArray<T>: IntoSeries,
{
fn pivot<'a>(
&self,
pivot_series: &'a Series,
keys: Vec<Series>,
groups: &[(u32, Vec<u32>)],
agg_type: PivotAgg,
) -> Result<DataFrame> {
// TODO: save an allocation by creating a random access struct for the Groupable utility type.
// Note: we also create pivot_vec with unique values, otherwise we have quadratic behavior
let mut pivot_series = pivot_series.clone();
let mut pivot_unique = pivot_series.unique()?;
let iter = pivot_unique.as_groupable_iter()?;
let pivot_vec_unique: Vec<_> = iter.collect();
let iter = pivot_series.as_groupable_iter()?;
let pivot_vec: Vec<_> = iter.collect();
let values_taker = self.take_rand();
// create a hash map that will be filled with the results of the aggregation.
let mut columns_agg_map_main =
create_new_column_builder_map::<T>(&pivot_vec_unique, groups);
// iterate over the groups that need to be aggregated
// idxes are the indexes of the groups in the keys, pivot, and values columns
for (_first, idx) in groups {
// for every group do the aggregation by adding them to the vector belonging by that column
// the columns are hashed with the pivot values
let mut columns_agg_map_group =
create_column_values_map::<T::Native>(&pivot_vec_unique, idx.len());
for &i in idx {
let i = i as usize;
let opt_pivot_val = unsafe { pivot_vec.get_unchecked(i) };
if let Some(pivot_val) = opt_pivot_val {
let values_val = values_taker.get(i);
if let Some(v) = columns_agg_map_group.get_mut(&pivot_val) {
v.push(values_val)
}
}
}
// After the vectors are filled we really do the aggregation and add the result to the main
// hash map, mapping pivot values as column to aggregate result.
for (k, v) in &mut columns_agg_map_group {
let main_builder = columns_agg_map_main.get_mut(k).unwrap();
match v.len() {
0 => main_builder.append_null(),
// NOTE: now we take first, but this is the place where all aggregations happen
_ => match agg_type {
PivotAgg::First => pivot_agg_first(main_builder, v),
PivotAgg::Sum => pivot_agg_sum(main_builder, v),
PivotAgg::Min => pivot_agg_min(main_builder, v),
PivotAgg::Max => pivot_agg_max(main_builder, v),
PivotAgg::Mean => pivot_agg_mean(main_builder, v),
PivotAgg::Median => pivot_agg_median(main_builder, v),
},
}
}
}
// Finalize the pivot by creating a vec of all the columns and creating a DataFrame
let mut cols = keys;
cols.reserve_exact(columns_agg_map_main.len());
for (_, builder) in columns_agg_map_main {
let ca = builder.finish();
cols.push(ca.into_series());
}
sort_cols(&mut cols);
DataFrame::new(cols)
}
fn pivot_count<'a>(
&self,
pivot_series: &'a Series,
keys: Vec<Series>,
groups: &[(u32, Vec<u32>)],
) -> Result<DataFrame> {
pivot_count_impl(self, pivot_series, keys, groups)
}
}
fn pivot_count_impl<'a, CA: TakeRandom>(
ca: &CA,
pivot_series: &'a Series,
keys: Vec<Series>,
groups: &[(u32, Vec<u32>)],
) -> Result<DataFrame> {
let mut pivot_series = pivot_series.clone();
let mut pivot_unique = pivot_series.unique()?;
let iter = pivot_unique.as_groupable_iter()?;
let pivot_vec_unique: Vec<_> = iter.collect();
let iter = pivot_series.as_groupable_iter()?;
let pivot_vec: Vec<_> = iter.collect();
// create a hash map that will be filled with the results of the aggregation.
let mut columns_agg_map_main =
create_new_column_builder_map::<UInt32Type>(&pivot_vec_unique, groups);
// iterate over the groups that need to be aggregated
// idxes are the indexes of the groups in the keys, pivot, and values columns
for (_first, idx) in groups {
// for every group do the aggregation by adding them to the vector belonging by that column
// the columns are hashed with the pivot values
let mut columns_agg_map_group =
create_column_values_map::<CA::Item>(&pivot_vec_unique, idx.len());
for &i in idx {
let i = i as usize;
let opt_pivot_val = unsafe { pivot_vec.get_unchecked(i) };
if let Some(pivot_val) = opt_pivot_val {
let values_val = ca.get(i);
if let Some(v) = columns_agg_map_group.get_mut(&pivot_val) {
v.push(values_val)
}
}
}
// After the vectors are filled we really do the aggregation and add the result to the main
// hash map, mapping pivot values as column to aggregate result.
for (k, v) in &mut columns_agg_map_group {
let main_builder = columns_agg_map_main.get_mut(k).unwrap();
main_builder.append_value(v.len() as u32)
}
}
// Finalize the pivot by creating a vec of all the columns and creating a DataFrame
let mut cols = keys;
cols.reserve_exact(columns_agg_map_main.len());
for (_, builder) in columns_agg_map_main {
let ca = builder.finish();
cols.push(ca.into_series());
}
sort_cols(&mut cols);
DataFrame::new(cols)
}
impl ChunkPivot for BooleanChunked {
fn pivot_count<'a>(
&self,
pivot_series: &'a Series,
keys: Vec<Series>,
groups: &[(u32, Vec<u32>)],
) -> Result<DataFrame> {
pivot_count_impl(self, pivot_series, keys, groups)
}
}
impl ChunkPivot for Utf8Chunked {
fn pivot_count<'a>(
&self,
pivot_series: &'a Series,
keys: Vec<Series>,
groups: &[(u32, Vec<u32>)],
) -> Result<DataFrame> {
pivot_count_impl(&self, pivot_series, keys, groups)
}
}
#[cfg(feature = "dtype-categorical")]
impl ChunkPivot for CategoricalChunked {
fn pivot_count<'a>(
&self,
pivot_series: &'a Series,
keys: Vec<Series>,
groups: &[(u32, Vec<u32>)],
) -> Result<DataFrame> {
self.deref().pivot_count(pivot_series, keys, groups)
}
}
impl ChunkPivot for ListChunked {
fn pivot<'a>(
&self,
pivot_series: &'a Series,
keys: Vec<Series>,
groups: &[(u32, Vec<u32>)],
agg_type: PivotAgg,
) -> Result<DataFrame> {
// TODO: save an allocation by creating a random access struct for the Groupable utility type.
// Note: we also create pivot_vec with unique values, otherwise we have quadratic behavior
let mut pivot_series = pivot_series.clone();
let mut pivot_unique = pivot_series.unique()?;
let iter = pivot_unique.as_groupable_iter()?;
let pivot_vec_unique: Vec<_> = iter.collect();
let iter = pivot_series.as_groupable_iter()?;
let pivot_vec: Vec<_> = iter.collect();
let values_taker = self.take_rand();
// create a hash map that will be filled with the results of the aggregation.
let mut columns_agg_map_main = {
// create a hash map that will be filled with the results of the aggregation.
let mut columns_agg_map_main = PlHashMap::new();
for column_name in pivot_vec.iter().flatten() {
columns_agg_map_main.entry(column_name).or_insert_with(|| {
get_list_builder(
&self.inner_dtype(),
groups.len(),
groups.len(),
&format!("{:?}", column_name),
)
});
}
columns_agg_map_main
};
// iterate over the groups that need to be aggregated
// idxes are the indexes of the groups in the keys, pivot, and values columns
for (_first, idx) in groups {
// for every group do the aggregation by adding them to the vector belonging by that column
// the columns are hashed with the pivot values
let mut columns_agg_map_group =
create_column_values_map::<Series>(&pivot_vec_unique, idx.len());
for &i in idx {
let i = i as usize;
let opt_pivot_val = unsafe { pivot_vec.get_unchecked(i) };
if let Some(pivot_val) = opt_pivot_val {
let values_val = values_taker.get(i);
if let Some(v) = columns_agg_map_group.get_mut(&pivot_val) {
v.push(values_val)
}
}
}
// After the vectors are filled we really do the aggregation and add the result to the main
// hash map, mapping pivot values as column to aggregate result.
for (k, v) in &mut columns_agg_map_group {
let main_builder = columns_agg_map_main.get_mut(k).unwrap();
match v.len() {
0 => main_builder.append_null(),
// NOTE: now we take first, but this is the place where all aggregations happen
_ => match agg_type {
PivotAgg::First => {
main_builder.append_opt_series(v[0].as_ref());
}
_ => unimplemented!(),
},
}
}
}
// Finalize the pivot by creating a vec of all the columns and creating a DataFrame
let mut cols = keys;
cols.reserve_exact(columns_agg_map_main.len());
for (_, mut builder) in columns_agg_map_main {
let ca = builder.finish();
cols.push(ca.into_series());
}
sort_cols(&mut cols);
DataFrame::new(cols)
}
}
#[cfg(feature = "object")]
impl<T> ChunkPivot for ObjectChunked<T> {}
pub enum PivotAgg {
First,
Sum,
Min,
Max,
Mean,
Median,
}
fn pivot_agg_first<T>(builder: &mut PrimitiveChunkedBuilder<T>, v: &[Option<T::Native>])
where
T: PolarsNumericType,
{
builder.append_option(v[0]);
}
fn pivot_agg_median<T>(builder: &mut PrimitiveChunkedBuilder<T>, v: &mut Vec<Option<T::Native>>)
where
T: PolarsNumericType,
T::Native: PartialOrd,
{
v.sort_unstable_by(|a, b| a.partial_cmp(b).unwrap());
builder.append_option(v[v.len() / 2]);
}
fn pivot_agg_sum<T>(builder: &mut PrimitiveChunkedBuilder<T>, v: &[Option<T::Native>])
where
T: PolarsNumericType,
T::Native: Num + Zero,
{
builder.append_option(v.iter().copied().fold_options(Zero::zero(), Add::add));
}
fn pivot_agg_mean<T>(builder: &mut PrimitiveChunkedBuilder<T>, v: &[Option<T::Native>])
where
T: PolarsNumericType,
T::Native: Num + Zero + NumCast,
{
builder.append_option(
v.iter()
.copied()
.fold_options::<T::Native, T::Native, _>(Zero::zero(), Add::add)
.map(|sum_val| sum_val / NumCast::from(v.len()).unwrap()),
);
}
fn pivot_agg_min<T>(builder: &mut PrimitiveChunkedBuilder<T>, v: &[Option<T::Native>])
where
T: PolarsNumericType,
T::Native: PartialOrd,
{
let mut min = None;
for val in v.iter().flatten() {
match min {
None => min = Some(*val),
Some(minimum) => {
if val < &minimum {
min = Some(*val)
}
}
}
}
builder.append_option(min);
}
fn pivot_agg_max<T>(builder: &mut PrimitiveChunkedBuilder<T>, v: &[Option<T::Native>])
where
T: PolarsNumericType,
T::Native: PartialOrd,
{
let mut max = None;
for val in v.iter().flatten() {
match max {
None => max = Some(*val),
Some(maximum) => {
if val > &maximum {
max = Some(*val)
}
}
}
}
builder.append_option(max);
}
impl<'df, 'sel_str> Pivot<'df, 'sel_str> {
/// Aggregate the pivot results by taking the count the values.
pub fn count(&self) -> Result<DataFrame> {
let pivot_series = self.gb.df.column(self.pivot_column)?;
let values_series = self.gb.df.column(self.values_column)?;
values_series.pivot_count(pivot_series, self.gb.keys(), &self.gb.groups)
}
/// Aggregate the pivot results by taking the first occurring value.
pub fn first(&self) -> Result<DataFrame> {
let pivot_series = self.gb.df.column(self.pivot_column)?;
let values_series = self.gb.df.column(self.values_column)?;
values_series.pivot(
pivot_series,
self.gb.keys(),
&self.gb.groups,
PivotAgg::First,
)
}
/// Aggregate the pivot results by taking the sum of all duplicates.
pub fn sum(&self) -> Result<DataFrame> {
let pivot_series = self.gb.df.column(self.pivot_column)?;
let values_series = self.gb.df.column(self.values_column)?;
values_series.pivot(pivot_series, self.gb.keys(), &self.gb.groups, PivotAgg::Sum)
}
/// Aggregate the pivot results by taking the minimal value of all duplicates.
pub fn | (&self) -> Result<DataFrame> {
let pivot_series = self.gb.df.column(self.pivot_column)?;
let values_series = self.gb.df.column(self.values_column)?;
values_series.pivot(pivot_series, self.gb.keys(), &self.gb.groups, PivotAgg::Min)
}
/// Aggregate the pivot results by taking the maximum value of all duplicates.
pub fn max(&self) -> Result<DataFrame> {
let pivot_series = self.gb.df.column(self.pivot_column)?;
let values_series = self.gb.df.column(self.values_column)?;
values_series.pivot(pivot_series, self.gb.keys(), &self.gb.groups, PivotAgg::Max)
}
/// Aggregate the pivot results by taking the mean value of all duplicates.
pub fn mean(&self) -> Result<DataFrame> {
let pivot_series = self.gb.df.column(self.pivot_column)?;
let values_series = self.gb.df.column(self.values_column)?;
values_series.pivot(
pivot_series,
self.gb.keys(),
&self.gb.groups,
PivotAgg::Mean,
)
}
/// Aggregate the pivot results by taking the median value of all duplicates.
pub fn median(&self) -> Result<DataFrame> {
let pivot_series = self.gb.df.column(self.pivot_column)?;
let values_series = self.gb.df.column(self.values_column)?;
values_series.pivot(
pivot_series,
self.gb.keys(),
&self.gb.groups,
PivotAgg::Median,
)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_pivot() {
let s0 = Series::new("foo", ["A", "A", "B", "B", "C"].as_ref());
let s1 = Series::new("N", [1, 2, 2, 4, 2].as_ref());
let s2 = Series::new("bar", ["k", "l", "m", "m", "l"].as_ref());
let df = DataFrame::new(vec![s0, s1, s2]).unwrap();
let pvt = df.groupby("foo").unwrap().pivot("bar", "N").sum().unwrap();
assert_eq!(pvt.get_column_names(), &["foo", "k", "l", "m"]);
assert_eq!(
Vec::from(&pvt.column("m").unwrap().i32().unwrap().sort(false)),
&[None, None, Some(6)]
);
let pvt = df.groupby("foo").unwrap().pivot("bar", "N").min().unwrap();
assert_eq!(
Vec::from(&pvt.column("m").unwrap().i32().unwrap().sort(false)),
&[None, None, Some(2)]
);
let pvt = df.groupby("foo").unwrap().pivot("bar", "N").max().unwrap();
assert_eq!(
Vec::from(&pvt.column("m").unwrap().i32().unwrap().sort(false)),
&[None, None, Some(4)]
);
let pvt = df.groupby("foo").unwrap().pivot("bar", "N").mean().unwrap();
assert_eq!(
Vec::from(&pvt.column("m").unwrap().i32().unwrap().sort(false)),
&[None, None, Some(3)]
);
let pvt = df
.groupby("foo")
.unwrap()
.pivot("bar", "N")
.count()
.unwrap();
assert_eq!(
Vec::from(&pvt.column("m").unwrap().u32().unwrap().sort(false)),
&[Some(0), Some(0), Some(2)]
);
}
}
| min |
ixconfig.go | // Copyright 2021 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ixconfig contains generated IxNetwork structs (along with some
// hand-written helper code) and implements an Ixia JSON config client using
// those structs.
//
// Since the autogenerated golang documentation for IxNetwork config structs in
// this package is long and hard to parse, see the OpenAPI spec for
// information about those structs instead:
// https://openixia.github.io/ixnetwork_openapi/
//
// Basic usage examples for the client can be found in README.md.
package ixconfig
import (
"golang.org/x/net/context"
"encoding/json"
"fmt"
"github.com/openconfig/ondatra/binding/ixweb"
)
type ixSession interface {
Config() config
}
type config interface {
Export(context.Context) (string, error)
Import(context.Context, string, bool) error
QueryIDs(context.Context, ...string) (map[string]string, error)
}
type sessionWrapper struct {
*ixweb.Session
}
func (sw *sessionWrapper) Config() config {
return sw.Session.Config()
}
// Client implements an API for interacting with an Ixia session using a JSON-based config representation.
type Client struct {
sess ixSession
lastImported *Ixnetwork
xPathToID map[string]string
}
// New returns a new Ixia config Client for a specific session for the given Ixia controller connection.
func New(sess *ixweb.Session) *Client {
return &Client{sess: &sessionWrapper{sess}}
}
// Session returns the IxNetwork session used by the config client.
func (c *Client) Session() *ixweb.Session {
return c.sess.(*sessionWrapper).Session
}
// NodeID returns the updated ID for the specified node. Returns an error if the
// node is not part of an imported config or the node ID has not been updated.
func (c *Client) NodeID(node IxiaCfgNode) (string, error) {
xp := node.XPath()
if xp == nil {
return "", fmt.Errorf("node of type %T not yet imported", node)
}
id, ok := c.xPathToID[xp.String()]
if !ok {
return "", fmt.Errorf("node at %q has no updated ID", xp)
}
return id, nil
}
// ExportConfig exports the current full configuration of the IxNetwork session.
func (c *Client) ExportConfig(ctx context.Context) (*Ixnetwork, error) {
cfgStr, err := c.sess.Config().Export(ctx)
if err != nil {
return nil, err
}
cfg := &Ixnetwork{}
if err := json.Unmarshal([]byte(cfgStr), cfg); err != nil {
return nil, fmt.Errorf("failed to unmarshal Ixia config object from config string %q: %w", cfgStr, err)
}
return cfg, nil
}
// ImportConfig imports the specified config into the IxNetwork session.
// The second argument is the root config node, and the third is the specific config to apply.
// If overwrite is 'true', the existing config is completely replaced with the contents of cfgNode.
// If overwrite is 'false, all values configured at and below the given config node are updated. | c.xPathToID = map[string]string{}
cfg.updateAllXPathsAndRefs()
jsonCfg, err := json.Marshal(node)
if err != nil {
return fmt.Errorf("could not marshal Ixnetwork config to JSON: %w", err)
}
if err := c.sess.Config().Import(ctx, string(jsonCfg), overwrite); err != nil {
return err
}
// Record the config that was pushed.
c.lastImported = cfg.Copy()
return nil
}
// LastImportedConfig returns a copy of the last config push attempt using this
// client. Returns 'nil' if there has not been a config push. A new copy of the
// last config is returned on every invocation and does not have its XPaths set.
func (c *Client) LastImportedConfig() *Ixnetwork {
return c.lastImported.Copy()
}
// UpdateIDs updates recorded REST IDs for the target nodes in the config.
// If the ID for the node is already updated, this is a noop for that node.
// This query can be expensive if used with many different types of objects.
func (c *Client) UpdateIDs(ctx context.Context, cfg *Ixnetwork, nodes ...IxiaCfgNode) error {
// Update XPaths because they may be lost as *Ixnetwork config objects
// are copied around (such as a config returned from
// 'LastImportedConfig' or a user may have constructed a read-only
// config subobject that was never imported.
cfg.updateAllXPathsAndRefs()
var xPathsMissing []string
for _, n := range nodes {
xp := n.XPath().String()
if _, ok := c.xPathToID[xp]; !ok {
xPathsMissing = append(xPathsMissing, xp)
}
}
newIDs, err := c.sess.Config().QueryIDs(ctx, xPathsMissing...)
if err != nil {
return err
}
for xp, id := range newIDs {
c.xPathToID[xp] = id
}
return nil
} | // For values that are a list of config nodes, only the nodes that are specified are updated. (Eg.
// you cannot remove a config node from a list using this function with overwrite set to 'false'.)
// All XPaths in the config are updated before this function returns.
func (c *Client) ImportConfig(ctx context.Context, cfg *Ixnetwork, node IxiaCfgNode, overwrite bool) error { |
meminfo.go | // Copyright 2017 Xiaomi, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package funcs
import (
"github.com/Major818/n9e/v4/src/common/dataobj"
"github.com/Major818/n9e/v4/src/modules/agentd/core"
"github.com/toolkits/pkg/logger"
"github.com/toolkits/pkg/nux"
)
func MemMetrics() []*dataobj.MetricValue | {
m, err := nux.MemInfo()
if err != nil {
logger.Error(err)
return nil
}
memFree := m.MemFree + m.Buffers + m.Cached
if m.MemAvailable > 0 {
memFree = m.MemAvailable
}
memUsed := m.MemTotal - memFree
pmemUsed := 0.0
if m.MemTotal != 0 {
pmemUsed = float64(memUsed) * 100.0 / float64(m.MemTotal)
}
pswapUsed := 0.0
if m.SwapTotal != 0 {
pswapUsed = float64(m.SwapUsed) * 100.0 / float64(m.SwapTotal)
}
return []*dataobj.MetricValue{
core.GaugeValue("mem.bytes.total", m.MemTotal),
core.GaugeValue("mem.bytes.used", memUsed),
core.GaugeValue("mem.bytes.free", memFree),
core.GaugeValue("mem.bytes.used.percent", pmemUsed),
core.GaugeValue("mem.bytes.buffers", m.Buffers),
core.GaugeValue("mem.bytes.cached", m.Cached),
core.GaugeValue("mem.swap.bytes.total", m.SwapTotal),
core.GaugeValue("mem.swap.bytes.used", m.SwapUsed),
core.GaugeValue("mem.swap.bytes.free", m.SwapFree),
core.GaugeValue("mem.swap.bytes.used.percent", pswapUsed),
}
} |
|
gbase64.go | // Copyright 2017 gf Author(https://github.com/gogf/gf). All Rights Reserved.
//
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT was not distributed with this file,
// You can obtain one at https://github.com/gogf/gf.
| import (
"encoding/base64"
"github.com/gogf/gf/util/gconv"
"io/ioutil"
)
// Encode encodes bytes with BASE64 algorithm.
func Encode(src []byte) []byte {
dst := make([]byte, base64.StdEncoding.EncodedLen(len(src)))
base64.StdEncoding.Encode(dst, src)
return dst
}
// Decode decodes bytes with BASE64 algorithm.
func Decode(dst []byte) ([]byte, error) {
src := make([]byte, base64.StdEncoding.DecodedLen(len(dst)))
n, err := base64.StdEncoding.Decode(src, dst)
return src[:n], err
}
// EncodeString encodes string with BASE64 algorithm.
func EncodeString(src string) string {
return EncodeToString([]byte(src))
}
// EncodeToString encodes bytes to string with BASE64 algorithm.
func EncodeToString(src []byte) string {
return gconv.UnsafeBytesToStr(Encode(src))
}
// EncryptFile encodes file content of <path> using BASE64 algorithms.
func EncodeFile(path string) ([]byte, error) {
content, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return Encode(content), nil
}
// EncodeFileToString encodes file content of <path> to string using BASE64 algorithms.
func EncodeFileToString(path string) (string, error) {
content, err := EncodeFile(path)
if err != nil {
return "", err
}
return gconv.UnsafeBytesToStr(content), nil
}
// DecodeString decodes string with BASE64 algorithm.
func DecodeString(str string) ([]byte, error) {
return Decode([]byte(str))
}
// DecodeString decodes string with BASE64 algorithm.
func DecodeToString(str string) (string, error) {
b, err := DecodeString(str)
return gconv.UnsafeBytesToStr(b), err
} | // Package gbase64 provides useful API for BASE64 encoding/decoding algorithm.
package gbase64
|
proto3.pb.go | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: proto3.proto
package proto3_proto
import (
fmt "fmt"
test_proto "github.com/gogo/protobuf/proto/test_proto"
types "github.com/gogo/protobuf/types"
proto "github.com/tron-us/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Message_Humour int32
const (
Message_UNKNOWN Message_Humour = 0
Message_PUNS Message_Humour = 1
Message_SLAPSTICK Message_Humour = 2
Message_BILL_BAILEY Message_Humour = 3
)
var Message_Humour_name = map[int32]string{
0: "UNKNOWN",
1: "PUNS",
2: "SLAPSTICK",
3: "BILL_BAILEY",
}
var Message_Humour_value = map[string]int32{
"UNKNOWN": 0,
"PUNS": 1,
"SLAPSTICK": 2,
"BILL_BAILEY": 3,
}
func (x Message_Humour) String() string {
return proto.EnumName(Message_Humour_name, int32(x))
}
func (Message_Humour) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_4fee6d65e34a64b6, []int{0, 0}
}
type Message struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty" pg:"name"`
Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,proto3,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty" pg:"hilarity"`
HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm,json=heightInCm,proto3" json:"height_in_cm,omitempty" pg:"height_in_cm"`
Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty" pg:"data"`
ResultCount int64 `protobuf:"varint,7,opt,name=result_count,json=resultCount,proto3" json:"result_count,omitempty" pg:"result_count"`
TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman,json=trueScotsman,proto3" json:"true_scotsman,omitempty" pg:"true_scotsman"`
Score float32 `protobuf:"fixed32,9,opt,name=score,proto3" json:"score,omitempty" pg:"score"`
Key []uint64 `protobuf:"varint,5,rep,packed,name=key,proto3" json:"key,omitempty" pg:"key"`
ShortKey []int32 `protobuf:"varint,19,rep,packed,name=short_key,json=shortKey,proto3" json:"short_key,omitempty" pg:"short_key"`
Nested *Nested `protobuf:"bytes,6,opt,name=nested,proto3" json:"nested,omitempty" pg:"nested"`
RFunny []Message_Humour `protobuf:"varint,16,rep,packed,name=r_funny,json=rFunny,proto3,enum=proto3_proto.Message_Humour" json:"r_funny,omitempty" pg:"r_funny"`
Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain,proto3" json:"terrain,omitempty" pg:"terrain" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Proto2Field *test_proto.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field,json=proto2Field,proto3" json:"proto2_field,omitempty" pg:"proto2_field"`
Proto2Value map[string]*test_proto.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value,json=proto2Value,proto3" json:"proto2_value,omitempty" pg:"proto2_value" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Anything *types.Any `protobuf:"bytes,14,opt,name=anything,proto3" json:"anything,omitempty" pg:"anything"`
ManyThings []*types.Any `protobuf:"bytes,15,rep,name=many_things,json=manyThings,proto3" json:"many_things,omitempty" pg:"many_things"`
Submessage *Message `protobuf:"bytes,17,opt,name=submessage,proto3" json:"submessage,omitempty" pg:"submessage"`
Children []*Message `protobuf:"bytes,18,rep,name=children,proto3" json:"children,omitempty" pg:"children"`
StringMap map[string]string `protobuf:"bytes,20,rep,name=string_map,json=stringMap,proto3" json:"string_map,omitempty" pg:"string_map" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-" pg:"-"`
XXX_unrecognized []byte `json:"-" pg:"-"`
XXX_sizecache int32 `json:"-" pg:"-"`
}
func (m *Message) Reset() { *m = Message{} }
func (m *Message) String() string { return proto.CompactTextString(m) }
func (*Message) ProtoMessage() {}
func (*Message) Descriptor() ([]byte, []int) {
return fileDescriptor_4fee6d65e34a64b6, []int{0}
}
func (m *Message) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Message.Unmarshal(m, b)
}
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
}
func (m *Message) XXX_Merge(src proto.Message) {
xxx_messageInfo_Message.Merge(m, src)
}
func (m *Message) XXX_Size() int {
return xxx_messageInfo_Message.Size(m)
}
func (m *Message) XXX_DiscardUnknown() {
xxx_messageInfo_Message.DiscardUnknown(m)
}
var xxx_messageInfo_Message proto.InternalMessageInfo
func (m *Message) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Message) GetHilarity() Message_Humour {
if m != nil {
return m.Hilarity
}
return Message_UNKNOWN
}
func (m *Message) GetHeightInCm() uint32 {
if m != nil {
return m.HeightInCm
}
return 0
}
func (m *Message) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
func (m *Message) GetResultCount() int64 {
if m != nil {
return m.ResultCount
}
return 0
}
func (m *Message) GetTrueScotsman() bool {
if m != nil {
return m.TrueScotsman
}
return false
}
func (m *Message) GetScore() float32 {
if m != nil {
return m.Score
}
return 0
}
func (m *Message) GetKey() []uint64 {
if m != nil {
return m.Key
}
return nil
}
func (m *Message) GetShortKey() []int32 {
if m != nil {
return m.ShortKey
}
return nil
}
func (m *Message) GetNested() *Nested {
if m != nil {
return m.Nested
}
return nil
}
func (m *Message) GetRFunny() []Message_Humour {
if m != nil {
return m.RFunny
}
return nil
}
func (m *Message) GetTerrain() map[string]*Nested {
if m != nil {
return m.Terrain
}
return nil
}
func (m *Message) GetProto2Field() *test_proto.SubDefaults {
if m != nil {
return m.Proto2Field
}
return nil
}
func (m *Message) GetProto2Value() map[string]*test_proto.SubDefaults {
if m != nil {
return m.Proto2Value
}
return nil
}
func (m *Message) GetAnything() *types.Any {
if m != nil {
return m.Anything
}
return nil
}
func (m *Message) GetManyThings() []*types.Any {
if m != nil {
return m.ManyThings
}
return nil
}
func (m *Message) GetSubmessage() *Message {
if m != nil {
return m.Submessage
}
return nil
}
func (m *Message) GetChildren() []*Message {
if m != nil {
return m.Children
}
return nil
}
func (m *Message) GetStringMap() map[string]string {
if m != nil |
return nil
}
type Nested struct {
Bunny string `protobuf:"bytes,1,opt,name=bunny,proto3" json:"bunny,omitempty" pg:"bunny"`
Cute bool `protobuf:"varint,2,opt,name=cute,proto3" json:"cute,omitempty" pg:"cute"`
XXX_NoUnkeyedLiteral struct{} `json:"-" pg:"-"`
XXX_unrecognized []byte `json:"-" pg:"-"`
XXX_sizecache int32 `json:"-" pg:"-"`
}
func (m *Nested) Reset() { *m = Nested{} }
func (m *Nested) String() string { return proto.CompactTextString(m) }
func (*Nested) ProtoMessage() {}
func (*Nested) Descriptor() ([]byte, []int) {
return fileDescriptor_4fee6d65e34a64b6, []int{1}
}
func (m *Nested) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Nested.Unmarshal(m, b)
}
func (m *Nested) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Nested.Marshal(b, m, deterministic)
}
func (m *Nested) XXX_Merge(src proto.Message) {
xxx_messageInfo_Nested.Merge(m, src)
}
func (m *Nested) XXX_Size() int {
return xxx_messageInfo_Nested.Size(m)
}
func (m *Nested) XXX_DiscardUnknown() {
xxx_messageInfo_Nested.DiscardUnknown(m)
}
var xxx_messageInfo_Nested proto.InternalMessageInfo
func (m *Nested) GetBunny() string {
if m != nil {
return m.Bunny
}
return ""
}
func (m *Nested) GetCute() bool {
if m != nil {
return m.Cute
}
return false
}
type MessageWithMap struct {
ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping,json=byteMapping,proto3" json:"byte_mapping,omitempty" pg:"byte_mapping" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-" pg:"-"`
XXX_unrecognized []byte `json:"-" pg:"-"`
XXX_sizecache int32 `json:"-" pg:"-"`
}
func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
func (*MessageWithMap) ProtoMessage() {}
func (*MessageWithMap) Descriptor() ([]byte, []int) {
return fileDescriptor_4fee6d65e34a64b6, []int{2}
}
func (m *MessageWithMap) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_MessageWithMap.Unmarshal(m, b)
}
func (m *MessageWithMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_MessageWithMap.Marshal(b, m, deterministic)
}
func (m *MessageWithMap) XXX_Merge(src proto.Message) {
xxx_messageInfo_MessageWithMap.Merge(m, src)
}
func (m *MessageWithMap) XXX_Size() int {
return xxx_messageInfo_MessageWithMap.Size(m)
}
func (m *MessageWithMap) XXX_DiscardUnknown() {
xxx_messageInfo_MessageWithMap.DiscardUnknown(m)
}
var xxx_messageInfo_MessageWithMap proto.InternalMessageInfo
func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
if m != nil {
return m.ByteMapping
}
return nil
}
type IntMap struct {
Rtt map[int32]int32 `protobuf:"bytes,1,rep,name=rtt,proto3" json:"rtt,omitempty" pg:"rtt" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-" pg:"-"`
XXX_unrecognized []byte `json:"-" pg:"-"`
XXX_sizecache int32 `json:"-" pg:"-"`
}
func (m *IntMap) Reset() { *m = IntMap{} }
func (m *IntMap) String() string { return proto.CompactTextString(m) }
func (*IntMap) ProtoMessage() {}
func (*IntMap) Descriptor() ([]byte, []int) {
return fileDescriptor_4fee6d65e34a64b6, []int{3}
}
func (m *IntMap) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IntMap.Unmarshal(m, b)
}
func (m *IntMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IntMap.Marshal(b, m, deterministic)
}
func (m *IntMap) XXX_Merge(src proto.Message) {
xxx_messageInfo_IntMap.Merge(m, src)
}
func (m *IntMap) XXX_Size() int {
return xxx_messageInfo_IntMap.Size(m)
}
func (m *IntMap) XXX_DiscardUnknown() {
xxx_messageInfo_IntMap.DiscardUnknown(m)
}
var xxx_messageInfo_IntMap proto.InternalMessageInfo
func (m *IntMap) GetRtt() map[int32]int32 {
if m != nil {
return m.Rtt
}
return nil
}
type IntMaps struct {
Maps []*IntMap `protobuf:"bytes,1,rep,name=maps,proto3" json:"maps,omitempty" pg:"maps"`
XXX_NoUnkeyedLiteral struct{} `json:"-" pg:"-"`
XXX_unrecognized []byte `json:"-" pg:"-"`
XXX_sizecache int32 `json:"-" pg:"-"`
}
func (m *IntMaps) Reset() { *m = IntMaps{} }
func (m *IntMaps) String() string { return proto.CompactTextString(m) }
func (*IntMaps) ProtoMessage() {}
func (*IntMaps) Descriptor() ([]byte, []int) {
return fileDescriptor_4fee6d65e34a64b6, []int{4}
}
func (m *IntMaps) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IntMaps.Unmarshal(m, b)
}
func (m *IntMaps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IntMaps.Marshal(b, m, deterministic)
}
func (m *IntMaps) XXX_Merge(src proto.Message) {
xxx_messageInfo_IntMaps.Merge(m, src)
}
func (m *IntMaps) XXX_Size() int {
return xxx_messageInfo_IntMaps.Size(m)
}
func (m *IntMaps) XXX_DiscardUnknown() {
xxx_messageInfo_IntMaps.DiscardUnknown(m)
}
var xxx_messageInfo_IntMaps proto.InternalMessageInfo
func (m *IntMaps) GetMaps() []*IntMap {
if m != nil {
return m.Maps
}
return nil
}
type TestUTF8 struct {
Scalar string `protobuf:"bytes,1,opt,name=scalar,proto3" json:"scalar,omitempty" pg:"scalar"`
Vector []string `protobuf:"bytes,2,rep,name=vector,proto3" json:"vector,omitempty" pg:"vector"`
// Types that are valid to be assigned to Oneof:
// *TestUTF8_Field
Oneof isTestUTF8_Oneof `protobuf_oneof:"oneof"`
MapKey map[string]int64 `protobuf:"bytes,4,rep,name=map_key,json=mapKey,proto3" json:"map_key,omitempty" pg:"map_key" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
MapValue map[int64]string `protobuf:"bytes,5,rep,name=map_value,json=mapValue,proto3" json:"map_value,omitempty" pg:"map_value" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_NoUnkeyedLiteral struct{} `json:"-" pg:"-"`
XXX_unrecognized []byte `json:"-" pg:"-"`
XXX_sizecache int32 `json:"-" pg:"-"`
}
func (m *TestUTF8) Reset() { *m = TestUTF8{} }
func (m *TestUTF8) String() string { return proto.CompactTextString(m) }
func (*TestUTF8) ProtoMessage() {}
func (*TestUTF8) Descriptor() ([]byte, []int) {
return fileDescriptor_4fee6d65e34a64b6, []int{5}
}
func (m *TestUTF8) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_TestUTF8.Unmarshal(m, b)
}
func (m *TestUTF8) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_TestUTF8.Marshal(b, m, deterministic)
}
func (m *TestUTF8) XXX_Merge(src proto.Message) {
xxx_messageInfo_TestUTF8.Merge(m, src)
}
func (m *TestUTF8) XXX_Size() int {
return xxx_messageInfo_TestUTF8.Size(m)
}
func (m *TestUTF8) XXX_DiscardUnknown() {
xxx_messageInfo_TestUTF8.DiscardUnknown(m)
}
var xxx_messageInfo_TestUTF8 proto.InternalMessageInfo
type isTestUTF8_Oneof interface {
isTestUTF8_Oneof()
}
type TestUTF8_Field struct {
Field string `protobuf:"bytes,3,opt,name=field,proto3,oneof" json:"field,omitempty" pg:"field"`
}
func (*TestUTF8_Field) isTestUTF8_Oneof() {}
func (m *TestUTF8) GetOneof() isTestUTF8_Oneof {
if m != nil {
return m.Oneof
}
return nil
}
func (m *TestUTF8) GetScalar() string {
if m != nil {
return m.Scalar
}
return ""
}
func (m *TestUTF8) GetVector() []string {
if m != nil {
return m.Vector
}
return nil
}
func (m *TestUTF8) GetField() string {
if x, ok := m.GetOneof().(*TestUTF8_Field); ok {
return x.Field
}
return ""
}
func (m *TestUTF8) GetMapKey() map[string]int64 {
if m != nil {
return m.MapKey
}
return nil
}
func (m *TestUTF8) GetMapValue() map[int64]string {
if m != nil {
return m.MapValue
}
return nil
}
// XXX_OneofWrappers is for the internal use of the proto package.
func (*TestUTF8) XXX_OneofWrappers() []interface{} {
return []interface{}{
(*TestUTF8_Field)(nil),
}
}
func init() {
proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value)
proto.RegisterType((*Message)(nil), "proto3_proto.Message")
proto.RegisterMapType((map[string]*test_proto.SubDefaults)(nil), "proto3_proto.Message.Proto2ValueEntry")
proto.RegisterMapType((map[string]string)(nil), "proto3_proto.Message.StringMapEntry")
proto.RegisterMapType((map[string]*Nested)(nil), "proto3_proto.Message.TerrainEntry")
proto.RegisterType((*Nested)(nil), "proto3_proto.Nested")
proto.RegisterType((*MessageWithMap)(nil), "proto3_proto.MessageWithMap")
proto.RegisterMapType((map[bool][]byte)(nil), "proto3_proto.MessageWithMap.ByteMappingEntry")
proto.RegisterType((*IntMap)(nil), "proto3_proto.IntMap")
proto.RegisterMapType((map[int32]int32)(nil), "proto3_proto.IntMap.RttEntry")
proto.RegisterType((*IntMaps)(nil), "proto3_proto.IntMaps")
proto.RegisterType((*TestUTF8)(nil), "proto3_proto.TestUTF8")
proto.RegisterMapType((map[string]int64)(nil), "proto3_proto.TestUTF8.MapKeyEntry")
proto.RegisterMapType((map[int64]string)(nil), "proto3_proto.TestUTF8.MapValueEntry")
}
func init() { proto.RegisterFile("proto3.proto", fileDescriptor_4fee6d65e34a64b6) }
var fileDescriptor_4fee6d65e34a64b6 = []byte{
// 891 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x54, 0xff, 0x8e, 0xdb, 0x44,
0x10, 0xae, 0xe3, 0xfc, 0x70, 0x26, 0xc9, 0x35, 0x2c, 0x69, 0x59, 0x02, 0x48, 0x26, 0x20, 0x64,
0x21, 0xea, 0x83, 0x54, 0x87, 0x8e, 0xb6, 0x02, 0xdd, 0x1d, 0x3d, 0x35, 0xba, 0x4b, 0x88, 0x36,
0x39, 0x4e, 0xfc, 0x65, 0x6d, 0x72, 0x9b, 0xc4, 0x22, 0x5e, 0x07, 0xef, 0xba, 0x92, 0x5f, 0x80,
0x07, 0xe1, 0x95, 0x78, 0x21, 0xb4, 0xbb, 0xce, 0xd5, 0xa9, 0x5c, 0xee, 0x2f, 0xef, 0x7c, 0xfe,
0x66, 0xbe, 0xd9, 0x99, 0xd9, 0x81, 0xf6, 0x2e, 0x89, 0x65, 0xfc, 0xdc, 0xd7, 0x1f, 0x94, 0x5b,
0x81, 0xfe, 0xf4, 0x3f, 0x5d, 0xc7, 0xf1, 0x7a, 0xcb, 0x8e, 0xb5, 0xb5, 0x48, 0x57, 0xc7, 0x94,
0x67, 0x86, 0xd8, 0x7f, 0x22, 0x99, 0x90, 0x86, 0x76, 0xac, 0x8e, 0x06, 0x1e, 0xfc, 0xdd, 0x84,
0xc6, 0x98, 0x09, 0x41, 0xd7, 0x0c, 0x21, 0xa8, 0x72, 0x1a, 0x31, 0x6c, 0xb9, 0x96, 0xd7, 0x24,
0xfa, 0x8c, 0x4e, 0xc1, 0xd9, 0x84, 0x5b, 0x9a, 0x84, 0x32, 0xc3, 0x15, 0xd7, 0xf2, 0x8e, 0x86,
0x9f, 0xfb, 0x45, 0x49, 0x3f, 0x77, 0xf6, 0xdf, 0xa4, 0x51, 0x9c, 0x26, 0xe4, 0x9e, 0x8d, 0x5c,
0x68, 0x6f, 0x58, 0xb8, 0xde, 0xc8, 0x20, 0xe4, 0xc1, 0x32, 0xc2, 0xb6, 0x6b, 0x79, 0x1d, 0x02,
0x06, 0x1b, 0xf1, 0x8b, 0x48, 0xe9, 0xdd, 0x51, 0x49, 0x71, 0xd5, 0xb5, 0xbc, 0x36, 0xd1, 0x67,
0xf4, 0x25, 0xb4, 0x13, 0x26, 0xd2, 0xad, 0x0c, 0x96, 0x71, 0xca, 0x25, 0x6e, 0xb8, 0x96, 0x67,
0x93, 0x96, 0xc1, 0x2e, 0x14, 0x84, 0xbe, 0x82, 0x8e, 0x4c, 0x52, 0x16, 0x88, 0x65, 0x2c, 0x45,
0x44, 0x39, 0x76, 0x5c, 0xcb, 0x73, 0x48, 0x5b, 0x81, 0xb3, 0x1c, 0x43, 0x3d, 0xa8, 0x89, 0x65,
0x9c, 0x30, 0xdc, 0x74, 0x2d, 0xaf, 0x42, 0x8c, 0x81, 0xba, 0x60, 0xff, 0xc9, 0x32, 0x5c, 0x73,
0x6d, 0xaf, 0x4a, 0xd4, 0x11, 0x7d, 0x06, 0x4d, 0xb1, 0x89, 0x13, 0x19, 0x28, 0xfc, 0x63, 0xd7,
0xf6, 0x6a, 0xc4, 0xd1, 0xc0, 0x15, 0xcb, 0xd0, 0x77, 0x50, 0xe7, 0x4c, 0x48, 0x76, 0x87, 0xeb,
0xae, 0xe5, 0xb5, 0x86, 0xbd, 0xc3, 0xab, 0x4f, 0xf4, 0x3f, 0x92, 0x73, 0xd0, 0x09, 0x34, 0x92,
0x60, 0x95, 0x72, 0x9e, 0xe1, 0xae, 0x6b, 0x3f, 0x58, 0xa9, 0x7a, 0x72, 0xa9, 0xb8, 0xe8, 0x15,
0x34, 0x24, 0x4b, 0x12, 0x1a, 0x72, 0x0c, 0xae, 0xed, 0xb5, 0x86, 0x83, 0x72, 0xb7, 0xb9, 0x21,
0xbd, 0xe6, 0x32, 0xc9, 0xc8, 0xde, 0x05, 0xbd, 0xc8, 0xe7, 0x61, 0x18, 0xac, 0x42, 0xb6, 0xbd,
0xc3, 0x2d, 0x9d, 0xe8, 0x27, 0xfe, 0xbb, 0x6e, 0xfb, 0xb3, 0x74, 0xf1, 0x2b, 0x5b, 0xd1, 0x74,
0x2b, 0x05, 0x69, 0x19, 0xf2, 0xa5, 0xe2, 0xa2, 0xd1, 0xbd, 0xef, 0x5b, 0xba, 0x4d, 0x19, 0xee,
0x68, 0xf9, 0x6f, 0xca, 0xe5, 0xa7, 0x9a, 0xf9, 0xbb, 0x22, 0x9a, 0x14, 0xf2, 0x50, 0x1a, 0x41,
0xdf, 0x83, 0x43, 0x79, 0x26, 0x37, 0x21, 0x5f, 0xe3, 0xa3, 0xbc, 0x56, 0x66, 0x16, 0xfd, 0xfd,
0x2c, 0xfa, 0x67, 0x3c, 0x23, 0xf7, 0x2c, 0x74, 0x02, 0xad, 0x88, 0xf2, 0x2c, 0xd0, 0x96, 0xc0,
0x8f, 0xb5, 0x76, 0xb9, 0x13, 0x28, 0xe2, 0x5c, 0xf3, 0xd0, 0x09, 0x80, 0x48, 0x17, 0x91, 0x49,
0x0a, 0x7f, 0xa4, 0xa5, 0x9e, 0x94, 0x66, 0x4c, 0x0a, 0x44, 0xf4, 0x03, 0x38, 0xcb, 0x4d, 0xb8,
0xbd, 0x4b, 0x18, 0xc7, 0x48, 0x4b, 0x7d, 0xc0, 0xe9, 0x9e, 0x86, 0x2e, 0x00, 0x84, 0x4c, 0x42,
0xbe, 0x0e, 0x22, 0xba, 0xc3, 0x3d, 0xed, 0xf4, 0x75, 0x79, 0x6d, 0x66, 0x9a, 0x37, 0xa6, 0x3b,
0x53, 0x99, 0xa6, 0xd8, 0xdb, 0xfd, 0x29, 0xb4, 0x8b, 0x7d, 0xdb, 0x0f, 0xa0, 0x79, 0x61, 0x7a,
0x00, 0xbf, 0x85, 0x9a, 0xa9, 0x7e, 0xe5, 0x7f, 0x46, 0xcc, 0x50, 0x5e, 0x54, 0x4e, 0xad, 0xfe,
0x2d, 0x74, 0xdf, 0x6f, 0x45, 0x49, 0xd4, 0x67, 0x87, 0x51, 0x3f, 0x38, 0x0f, 0x85, 0xc0, 0xaf,
0xe0, 0xe8, 0xf0, 0x1e, 0x25, 0x61, 0x7b, 0xc5, 0xb0, 0xcd, 0x82, 0xf7, 0xe0, 0x17, 0xa8, 0x9b,
0xb9, 0x46, 0x2d, 0x68, 0xdc, 0x4c, 0xae, 0x26, 0xbf, 0xdd, 0x4e, 0xba, 0x8f, 0x90, 0x03, 0xd5,
0xe9, 0xcd, 0x64, 0xd6, 0xb5, 0x50, 0x07, 0x9a, 0xb3, 0xeb, 0xb3, 0xe9, 0x6c, 0x3e, 0xba, 0xb8,
0xea, 0x56, 0xd0, 0x63, 0x68, 0x9d, 0x8f, 0xae, 0xaf, 0x83, 0xf3, 0xb3, 0xd1, 0xf5, 0xeb, 0x3f,
0xba, 0xf6, 0x60, 0x08, 0x75, 0x73, 0x59, 0x25, 0xb2, 0xd0, 0xaf, 0xc8, 0x08, 0x1b, 0x43, 0x2d,
0x8b, 0x65, 0x2a, 0x8d, 0xb2, 0x43, 0xf4, 0x79, 0xf0, 0x8f, 0x05, 0x47, 0x79, 0x0f, 0x6e, 0x43,
0xb9, 0x19, 0xd3, 0x1d, 0x9a, 0x42, 0x7b, 0x91, 0x49, 0xa6, 0x7a, 0xb6, 0x53, 0xc3, 0x68, 0xe9,
0xbe, 0x3d, 0x2b, 0xed, 0x5b, 0xee, 0xe3, 0x9f, 0x67, 0x92, 0x8d, 0x0d, 0x3f, 0x1f, 0xed, 0xc5,
0x3b, 0xa4, 0xff, 0x33, 0x74, 0xdf, 0x27, 0x14, 0x2b, 0xe3, 0x94, 0x54, 0xa6, 0x5d, 0xac, 0xcc,
0x5f, 0x50, 0x1f, 0x71, 0xa9, 0x72, 0x3b, 0x06, 0x3b, 0x91, 0x32, 0x4f, 0xe9, 0x8b, 0xc3, 0x94,
0x0c, 0xc5, 0x27, 0x52, 0x9a, 0x14, 0x14, 0xb3, 0xff, 0x23, 0x38, 0x7b, 0xa0, 0x28, 0x59, 0x2b,
0x91, 0xac, 0x15, 0x25, 0x9f, 0x43, 0xc3, 0xc4, 0x13, 0xc8, 0x83, 0x6a, 0x44, 0x77, 0x22, 0x17,
0xed, 0x95, 0x89, 0x12, 0xcd, 0x18, 0xfc, 0x5b, 0x01, 0x67, 0xce, 0x84, 0xbc, 0x99, 0x5f, 0x9e,
0xa2, 0xa7, 0x50, 0x17, 0x4b, 0xba, 0xa5, 0x49, 0xde, 0x84, 0xdc, 0x52, 0xf8, 0x5b, 0xb6, 0x94,
0x71, 0x82, 0x2b, 0xae, 0xad, 0x70, 0x63, 0xa1, 0xa7, 0x50, 0x33, 0xfb, 0x47, 0x6d, 0xf9, 0xe6,
0x9b, 0x47, 0xc4, 0x98, 0xe8, 0x25, 0x34, 0x22, 0xba, 0xd3, 0xcb, 0xb5, 0x5a, 0xb6, 0xdc, 0xf6,
0x82, 0xfe, 0x98, 0xee, 0xae, 0x58, 0x66, 0xee, 0x5e, 0x8f, 0xb4, 0x81, 0xce, 0xa0, 0xa9, 0x9c,
0xcd, 0x25, 0x6b, 0x65, 0x0f, 0xb0, 0xe8, 0x5e, 0x58, 0x4d, 0x4e, 0x94, 0x9b, 0xfd, 0x9f, 0xa0,
0x55, 0x88, 0xfc, 0xd0, 0x44, 0xdb, 0xc5, 0xf7, 0xf0, 0x12, 0x3a, 0x07, 0x51, 0x8b, 0xce, 0xf6,
0x03, 0xcf, 0xe1, 0xbc, 0x01, 0xb5, 0x98, 0xb3, 0x78, 0xb5, 0xa8, 0x9b, 0x7c, 0xff, 0x0b, 0x00,
0x00, 0xff, 0xff, 0x0e, 0x22, 0xea, 0x15, 0xb6, 0x07, 0x00, 0x00,
}
| {
return m.StringMap
} |
login2.rs | use druid::{
widget::{Button, Controller, Flex, Label, TextBox},
AppLauncher, Data, Env, Event, EventCtx, Lens, PlatformError, Selector, Widget, WidgetExt,
WindowDesc,
};
use druid_enums::Matcher;
const LOGGED: Selector<LoggedMainState> = Selector::new("druid-enums.basic.logged");
const UNLOGGED: Selector<UnloggedMainState> = Selector::new("druid-enums.basic.unlogged");
#[derive(Clone, Data, Matcher)]
#[matcher(matcher_name = App)] // defaults to AppStateMatcher
enum AppState {
Login(LoginState),
Main(MainState),
}
#[derive(Clone, Data, Lens, Default)]
struct LoginState {
user: String,
}
#[derive(Clone, Data, Matcher)]
enum MainState {
LoggedMainState(LoggedMainState),
UnloggedMainState(UnloggedMainState),
}
#[derive(Clone, Data, Lens)]
struct UnloggedMainState {
count: u32,
}
#[derive(Clone, Data, Lens)]
struct LoggedMainState {
user: String,
count: u32,
}
fn main() -> Result<(), PlatformError> {
let window = WindowDesc::new(ui).title("Druid Enums");
let state = AppState::Login(LoginState::default());
AppLauncher::with_window(window)
.use_simple_logger()
.launch(state)
}
fn ui() -> impl Widget<AppState> {
// AppState::matcher() or
App::new()
.login(login_ui())
.main(main_ui())
.controller(LoginController)
}
fn login_ui() -> impl Widget<LoginState> {
fn login(ctx: &mut EventCtx, state: &mut LoginState, _: &Env) {
if state.user.is_empty() {
ctx.submit_command(UNLOGGED.with(UnloggedMainState::from(state.clone())), None)
} else {
ctx.submit_command(LOGGED.with(LoggedMainState::from(state.clone())), None)
}
}
Flex::row()
.with_child(TextBox::new().lens(LoginState::user))
.with_spacer(5.0)
.with_child(Button::new("Login").on_click(login))
.center()
}
fn main_ui() -> impl Widget<MainState> {
MainStateMatcher::new()
.logged_main_state(logged_main_ui())
.unlogged_main_state(unlogged_main_ui())
.controller(MainController)
}
fn logged_main_ui() -> impl Widget<LoggedMainState> {
Flex::column()
.with_child(Label::dynamic(LoggedMainState::welcome_label))
.with_spacer(5.0)
.with_child(
Button::dynamic(LoggedMainState::count_label)
.on_click(|_, state: &mut LoggedMainState, _| state.count += 1),
)
.center()
}
fn | () -> impl Widget<UnloggedMainState> {
Flex::column()
.with_child(
Button::dynamic(UnloggedMainState::count_label)
.on_click(|_, state: &mut UnloggedMainState, _| state.count += 1),
)
.center()
}
struct LoginController;
impl Controller<AppState, App> for LoginController {
fn event(
&mut self,
child: &mut App,
ctx: &mut EventCtx,
event: &Event,
data: &mut AppState,
env: &Env,
) {
match event {
Event::Command(cmd) if cmd.is(LOGGED) => {
let main_state = cmd.get_unchecked(LOGGED).clone();
*data = AppState::Main(MainState::LoggedMainState(main_state));
}
Event::Command(cmd) if cmd.is(UNLOGGED) => {
let main_state = cmd.get_unchecked(UNLOGGED).clone();
*data = AppState::Main(MainState::UnloggedMainState(main_state));
}
_ => {}
}
child.event(ctx, event, data, env)
}
}
struct MainController;
impl Controller<MainState, MainStateMatcher> for MainController {
fn event(
&mut self,
child: &mut MainStateMatcher,
ctx: &mut EventCtx,
event: &Event,
data: &mut MainState,
env: &Env,
) {
child.event(ctx, event, data, env)
}
}
impl LoggedMainState {
pub fn welcome_label(&self, _: &Env) -> String {
format!("Welcome {}!", self.user)
}
pub fn count_label(&self, _: &Env) -> String {
format!("clicked {} times", self.count)
}
}
impl UnloggedMainState {
pub fn count_label(&self, _: &Env) -> String {
format!("clicked {} times", self.count)
}
}
impl From<LoginState> for LoggedMainState {
fn from(login: LoginState) -> Self {
Self {
user: login.user,
count: 0,
}
}
}
impl From<LoginState> for UnloggedMainState {
fn from(_login: LoginState) -> Self {
Self { count: 0 }
}
}
| unlogged_main_ui |
helpers.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package api
import (
"crypto/md5"
"encoding/json"
"fmt"
"reflect"
"strings"
"time"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/selection"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/davecgh/go-spew/spew"
)
// Conversion error conveniently packages up errors in conversions.
type ConversionError struct {
In, Out interface{}
Message string
}
// Return a helpful string about the error
func (c *ConversionError) Error() string {
return spew.Sprintf(
"Conversion error: %s. (in: %v(%+v) out: %v)",
c.Message, reflect.TypeOf(c.In), c.In, reflect.TypeOf(c.Out),
)
}
// Semantic can do semantic deep equality checks for api objects.
// Example: api.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true
var Semantic = conversion.EqualitiesOrDie(
func(a, b resource.Quantity) bool {
// Ignore formatting, only care that numeric value stayed the same.
// TODO: if we decide it's important, it should be safe to start comparing the format.
//
// Uninitialized quantities are equivalent to 0 quantities.
return a.Cmp(b) == 0
},
func(a, b unversioned.Time) bool {
return a.UTC() == b.UTC()
},
func(a, b labels.Selector) bool {
return a.String() == b.String()
},
func(a, b fields.Selector) bool {
return a.String() == b.String()
},
)
var standardResourceQuotaScopes = sets.NewString(
string(ResourceQuotaScopeTerminating),
string(ResourceQuotaScopeNotTerminating),
string(ResourceQuotaScopeBestEffort),
string(ResourceQuotaScopeNotBestEffort),
)
// IsStandardResourceQuotaScope returns true if the scope is a standard value
func IsStandardResourceQuotaScope(str string) bool {
return standardResourceQuotaScopes.Has(str)
}
var podObjectCountQuotaResources = sets.NewString(
string(ResourcePods),
)
var podComputeQuotaResources = sets.NewString(
string(ResourceCPU),
string(ResourceMemory),
string(ResourceLimitsCPU),
string(ResourceLimitsMemory),
string(ResourceRequestsCPU),
string(ResourceRequestsMemory),
)
// IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope
func IsResourceQuotaScopeValidForResource(scope ResourceQuotaScope, resource string) bool {
switch scope {
case ResourceQuotaScopeTerminating, ResourceQuotaScopeNotTerminating, ResourceQuotaScopeNotBestEffort:
return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource)
case ResourceQuotaScopeBestEffort:
return podObjectCountQuotaResources.Has(resource)
default:
return true
}
}
var standardContainerResources = sets.NewString(
string(ResourceCPU),
string(ResourceMemory),
)
// IsStandardContainerResourceName returns true if the container can make a resource request
// for the specified resource
func IsStandardContainerResourceName(str string) bool {
return standardContainerResources.Has(str)
}
var standardLimitRangeTypes = sets.NewString(
string(LimitTypePod),
string(LimitTypeContainer),
)
// IsStandardLimitRangeType returns true if the type is Pod or Container
func IsStandardLimitRangeType(str string) bool {
return standardLimitRangeTypes.Has(str)
}
var standardQuotaResources = sets.NewString(
string(ResourceCPU),
string(ResourceMemory),
string(ResourceRequestsCPU),
string(ResourceRequestsMemory),
string(ResourceRequestsStorage),
string(ResourceLimitsCPU),
string(ResourceLimitsMemory),
string(ResourcePods),
string(ResourceQuotas),
string(ResourceServices),
string(ResourceReplicationControllers),
string(ResourceSecrets),
string(ResourcePersistentVolumeClaims),
string(ResourceConfigMaps),
string(ResourceServicesNodePorts),
string(ResourceServicesLoadBalancers),
)
// IsStandardQuotaResourceName returns true if the resource is known to
// the quota tracking system
func IsStandardQuotaResourceName(str string) bool {
return standardQuotaResources.Has(str)
}
var standardResources = sets.NewString(
string(ResourceCPU),
string(ResourceMemory),
string(ResourceRequestsCPU),
string(ResourceRequestsMemory),
string(ResourceLimitsCPU),
string(ResourceLimitsMemory),
string(ResourcePods),
string(ResourceQuotas),
string(ResourceServices),
string(ResourceReplicationControllers),
string(ResourceSecrets),
string(ResourceConfigMaps),
string(ResourcePersistentVolumeClaims),
string(ResourceStorage),
)
// IsStandardResourceName returns true if the resource is known to the system
func IsStandardResourceName(str string) bool {
return standardResources.Has(str)
}
var integerResources = sets.NewString(
string(ResourcePods),
string(ResourceQuotas),
string(ResourceServices),
string(ResourceReplicationControllers),
string(ResourceSecrets),
string(ResourceConfigMaps),
string(ResourcePersistentVolumeClaims),
string(ResourceServicesNodePorts),
string(ResourceServicesLoadBalancers),
)
// IsIntegerResourceName returns true if the resource is measured in integer values
func IsIntegerResourceName(str string) bool {
return integerResources.Has(str)
}
// NewDeleteOptions returns a DeleteOptions indicating the resource should
// be deleted within the specified grace period. Use zero to indicate
// immediate deletion. If you would prefer to use the default grace period,
// use &api.DeleteOptions{} directly.
func NewDeleteOptions(grace int64) *DeleteOptions {
return &DeleteOptions{GracePeriodSeconds: &grace}
}
// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set.
func NewPreconditionDeleteOptions(uid string) *DeleteOptions {
u := types.UID(uid)
p := Preconditions{UID: &u}
return &DeleteOptions{Preconditions: &p}
}
// NewUIDPreconditions returns a Preconditions with UID set.
func NewUIDPreconditions(uid string) *Preconditions {
u := types.UID(uid)
return &Preconditions{UID: &u}
}
// this function aims to check if the service's ClusterIP is set or not
// the objective is not to perform validation here
func IsServiceIPSet(service *Service) bool {
return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != ""
}
// this function aims to check if the service's cluster IP is requested or not
func IsServiceIPRequested(service *Service) bool {
// ExternalName services are CNAME aliases to external ones. Ignore the IP.
if service.Spec.Type == ServiceTypeExternalName {
return false
}
return service.Spec.ClusterIP == ""
}
var standardFinalizers = sets.NewString(
string(FinalizerKubernetes),
FinalizerOrphan,
)
func IsStandardFinalizerName(str string) bool {
return standardFinalizers.Has(str)
}
// SingleObject returns a ListOptions for watching a single object.
func SingleObject(meta ObjectMeta) ListOptions {
return ListOptions{
FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name),
ResourceVersion: meta.ResourceVersion,
}
}
// AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice,
// only if they do not already exist
func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) {
for _, add := range addAddresses {
exists := false
for _, existing := range *addresses {
if existing.Address == add.Address && existing.Type == add.Type {
exists = true
break
}
}
if !exists {
*addresses = append(*addresses, add)
}
}
}
func HashObject(obj runtime.Object, codec runtime.Codec) (string, error) {
data, err := runtime.Encode(codec, obj)
if err != nil {
return "", err
}
return fmt.Sprintf("%x", md5.Sum(data)), nil
}
// TODO: make method on LoadBalancerStatus?
func LoadBalancerStatusEqual(l, r *LoadBalancerStatus) bool {
return ingressSliceEqual(l.Ingress, r.Ingress)
}
func ingressSliceEqual(lhs, rhs []LoadBalancerIngress) bool {
if len(lhs) != len(rhs) {
return false
}
for i := range lhs {
if !ingressEqual(&lhs[i], &rhs[i]) {
return false
}
}
return true
}
func ingressEqual(lhs, rhs *LoadBalancerIngress) bool {
if lhs.IP != rhs.IP {
return false
}
if lhs.Hostname != rhs.Hostname {
return false
}
return true
}
// TODO: make method on LoadBalancerStatus?
func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus {
c := &LoadBalancerStatus{}
c.Ingress = make([]LoadBalancerIngress, len(lb.Ingress))
for i := range lb.Ingress {
c.Ingress[i] = lb.Ingress[i]
}
return c
}
// GetAccessModesAsString returns a string representation of an array of access modes.
// modes, when present, are always in the same order: RWO,ROX,RWX.
func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string {
modes = removeDuplicateAccessModes(modes)
modesStr := []string{}
if containsAccessMode(modes, ReadWriteOnce) {
modesStr = append(modesStr, "RWO")
}
if containsAccessMode(modes, ReadOnlyMany) {
modesStr = append(modesStr, "ROX")
}
if containsAccessMode(modes, ReadWriteMany) {
modesStr = append(modesStr, "RWX")
}
return strings.Join(modesStr, ",")
}
// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString
func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode {
strmodes := strings.Split(modes, ",")
accessModes := []PersistentVolumeAccessMode{}
for _, s := range strmodes {
s = strings.Trim(s, " ")
switch {
case s == "RWO":
accessModes = append(accessModes, ReadWriteOnce)
case s == "ROX":
accessModes = append(accessModes, ReadOnlyMany)
case s == "RWX":
accessModes = append(accessModes, ReadWriteMany)
}
}
return accessModes
}
// removeDuplicateAccessModes returns an array of access modes without any duplicates
func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode {
accessModes := []PersistentVolumeAccessMode{}
for _, m := range modes {
if !containsAccessMode(accessModes, m) {
accessModes = append(accessModes, m)
}
}
return accessModes
}
func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool {
for _, m := range modes {
if m == mode {
return true
}
}
return false
}
// ParseRFC3339 parses an RFC3339 date in either RFC3339Nano or RFC3339 format.
func ParseRFC3339(s string, nowFn func() unversioned.Time) (unversioned.Time, error) {
if t, timeErr := time.Parse(time.RFC3339Nano, s); timeErr == nil {
return unversioned.Time{Time: t}, nil
}
t, err := time.Parse(time.RFC3339, s)
if err != nil {
return unversioned.Time{}, err
}
return unversioned.Time{Time: t}, nil
}
// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements
// labels.Selector.
func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.Selector, error) {
if len(nsm) == 0 {
return labels.Nothing(), nil
}
selector := labels.NewSelector()
for _, expr := range nsm {
var op selection.Operator
switch expr.Operator {
case NodeSelectorOpIn:
op = selection.In
case NodeSelectorOpNotIn:
op = selection.NotIn
case NodeSelectorOpExists:
op = selection.Exists
case NodeSelectorOpDoesNotExist:
op = selection.DoesNotExist
case NodeSelectorOpGt:
op = selection.GreaterThan
case NodeSelectorOpLt:
op = selection.LessThan
default:
return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator)
}
r, err := labels.NewRequirement(expr.Key, op, sets.NewString(expr.Values...))
if err != nil {
return nil, err
}
selector = selector.Add(*r)
}
return selector, nil
}
const (
// AffinityAnnotationKey represents the key of affinity data (json serialized)
// in the Annotations of a Pod.
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
// in the Annotations of a Pod.
TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
// TaintsAnnotationKey represents the key of taints data (json serialized)
// in the Annotations of a Node.
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
// SeccompPodAnnotationKey represents the key of a seccomp profile applied
// to all containers of a pod.
SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod"
// SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied
// to one container of a pod.
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/"
// CreatedByAnnotation represents the key used to store the spec(json)
// used to create the resource.
CreatedByAnnotation = "kubernetes.io/created-by"
// PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized)
// in the Annotations of a Node.
PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods"
// SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
// key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by
// the kubelet. Pods with other sysctls will fail to launch.
SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls"
// UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure
// container of a pod. The annotation value is a comma separated list of sysctl_name=value
// key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly
// namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use
// is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet
// will fail to launch.
UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls"
)
// GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations
// and converts it to the Affinity type in api.
func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) {
if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" {
var affinity Affinity
err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity)
if err != nil {
return nil, err
}
return &affinity, nil
}
return nil, nil
}
// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
// and converts it to the []Toleration type in api.
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) {
var tolerations []Toleration
if len(annotations) > 0 && annotations[TolerationsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[TolerationsAnnotationKey]), &tolerations)
if err != nil {
return tolerations, err
}
}
return tolerations, nil
}
// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations
// and converts it to the []Taint type in api.
func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) {
var taints []Taint
if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints)
if err != nil {
return []Taint{}, err
}
}
return taints, nil
}
// TolerationToleratesTaint checks if the toleration tolerates the taint.
func TolerationToleratesTaint(toleration *Toleration, taint *Taint) bool {
if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect {
return false
}
if toleration.Key != taint.Key {
return false
}
// TODO: Use proper defaulting when Toleration becomes a field of PodSpec
if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value {
return true
}
if toleration.Operator == TolerationOpExists {
return true
}
return false
}
// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations.
func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool {
tolerated := false
for i := range tolerations {
if TolerationToleratesTaint(&tolerations[i], taint) {
tolerated = true
break
}
}
return tolerated
}
// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect,
// if the two taints have same key:effect, regard as they match.
func (t *Taint) MatchTaint(taintToMatch Taint) bool {
return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect
}
// taint.ToString() converts taint struct to string in format key=value:effect or key:effect.
func (t *Taint) ToString() string {
if len(t.Value) == 0 {
return fmt.Sprintf("%v:%v", t.Key, t.Effect)
}
return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect)
}
func GetAvoidPodsFromNodeAnnotations(annotations map[string]string) (AvoidPods, error) {
var avoidPods AvoidPods
if len(annotations) > 0 && annotations[PreferAvoidPodsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[PreferAvoidPodsAnnotationKey]), &avoidPods)
if err != nil {
return avoidPods, err
}
}
return avoidPods, nil
}
// SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls
// and a slice of unsafe Sysctls. This is only a convenience wrapper around
// SysctlsFromPodAnnotation.
func SysctlsFromPodAnnotations(a map[string]string) ([]Sysctl, []Sysctl, error) {
safe, err := SysctlsFromPodAnnotation(a[SysctlsPodAnnotationKey])
if err != nil {
return nil, nil, err
}
unsafe, err := SysctlsFromPodAnnotation(a[UnsafeSysctlsPodAnnotationKey])
if err != nil {
return nil, nil, err
}
return safe, unsafe, nil
}
// SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls.
func SysctlsFromPodAnnotation(annotation string) ([]Sysctl, error) {
if len(annotation) == 0 {
return nil, nil
}
kvs := strings.Split(annotation, ",")
sysctls := make([]Sysctl, len(kvs))
for i, kv := range kvs {
cs := strings.Split(kv, "=")
if len(cs) != 2 || len(cs[0]) == 0 {
return nil, fmt.Errorf("sysctl %q not of the format sysctl_name=value", kv)
}
sysctls[i].Name = cs[0]
sysctls[i].Value = cs[1]
}
return sysctls, nil
}
// PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls.
func | (sysctls []Sysctl) string {
if len(sysctls) == 0 {
return ""
}
kvs := make([]string, len(sysctls))
for i := range sysctls {
kvs[i] = fmt.Sprintf("%s=%s", sysctls[i].Name, sysctls[i].Value)
}
return strings.Join(kvs, ",")
}
| PodAnnotationsFromSysctls |
list_search.py | # https://stackoverflow.com/questions/16974047/efficient-way-to-find-missing-elements-in-an-integer-sequence/16974075#16974075 | from itertools import islice, chain
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def missing_elements(L):
missing = chain.from_iterable(range(x + 1, y) for x, y in window(L) if (y - x) > 1)
return list(missing) | |
networkpolicy.go | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
"time"
v1beta1 "github.com/vmware-tanzu/antrea/pkg/apis/networkpolicy/v1beta1"
scheme "github.com/vmware-tanzu/antrea/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface.
// A group's client should implement this interface.
type NetworkPoliciesGetter interface {
NetworkPolicies(namespace string) NetworkPolicyInterface
}
// NetworkPolicyInterface has methods to work with NetworkPolicy resources.
type NetworkPolicyInterface interface {
Get(name string, options v1.GetOptions) (*v1beta1.NetworkPolicy, error)
List(opts v1.ListOptions) (*v1beta1.NetworkPolicyList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
NetworkPolicyExpansion
}
// networkPolicies implements NetworkPolicyInterface
type networkPolicies struct {
client rest.Interface
ns string
}
// newNetworkPolicies returns a NetworkPolicies
func newNetworkPolicies(c *NetworkpolicyV1beta1Client, namespace string) *networkPolicies |
// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any.
func (c *networkPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.NetworkPolicy, err error) {
result = &v1beta1.NetworkPolicy{}
err = c.client.Get().
Namespace(c.ns).
Resource("networkpolicies").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors.
func (c *networkPolicies) List(opts v1.ListOptions) (result *v1beta1.NetworkPolicyList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1beta1.NetworkPolicyList{}
err = c.client.Get().
Namespace(c.ns).
Resource("networkpolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested networkPolicies.
func (c *networkPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("networkpolicies").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
| {
return &networkPolicies{
client: c.RESTClient(),
ns: namespace,
}
} |
s3_test.go | package storage
import (
"testing"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/golang/mock/gomock"
"github.com/mailchain/mailchain/internal/envelope"
"github.com/mailchain/mailchain/internal/mail"
"github.com/mailchain/mailchain/internal/encoding/encodingtest"
"github.com/mailchain/mailchain/stores"
"github.com/mailchain/mailchain/stores/storestest"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func Test_createS3Client(t *testing.T) {
assert := assert.New(t)
type args struct {
region string
id string
secret string
}
tests := []struct {
name string
args args
wantNil bool
wantErr bool
}{
{
"success-without-credentials",
args{
"region",
"",
"",
},
false,
false,
},
{
"success-with-credentials",
args{
"region",
"id",
"secret",
},
false,
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := createS3Client(tt.args.region, tt.args.id, tt.args.secret)
if (err != nil) != tt.wantErr {
t.Errorf("createSession() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !assert.Equal(tt.wantNil, got == nil) {
t.Errorf("createSession() = %v, want %v", got, tt.wantNil)
}
})
}
}
func TestNewSentStore(t *testing.T) {
assert := assert.New(t)
type args struct {
region string
bucket string
id string
secret string
}
tests := []struct {
name string
args args
wantNil bool
wantErr bool
}{
{
"err-region-empty",
args{
"",
"",
"",
"",
},
true,
true,
},
{
"err-bucket-empty",
args{
"us-east-1",
"",
"",
"",
},
true,
true,
},
{
"success",
args{
"us-east-1",
"bucket",
"",
"",
},
false,
false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := NewSentStore(tt.args.region, tt.args.bucket, tt.args.id, tt.args.secret)
if (err != nil) != tt.wantErr {
t.Errorf("NewSentStore() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !assert.Equal(tt.wantNil, got == nil) {
t.Errorf("NewSentStore() = %v, want %v", got, tt.wantNil)
}
})
}
}
func TestS3Store_Put(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
type fields struct {
headObjectFunc func(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
sent stores.Sent
bucket string
}
type args struct {
messageID mail.ID
contentsHash []byte
integrityHash []byte
contents []byte
}
tests := []struct {
name string
fields fields
args args
wantAddress string
wantResource string
wantMLI uint64
wantErr bool
}{
{
"success",
fields{
nil,
func() stores.Sent {
sent := storestest.NewMockSent(mockCtrl)
var id mail.ID
id = encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761")
sent.EXPECT().Key(id, []byte("contents-hash"), []byte("body")).Return("hashkey")
sent.EXPECT().PutMessage(id, []byte("contents-hash"), []byte("body"), nil).Return("https://s3bucket/hashkey", "hashkey", envelope.MLIMailchain, nil)
return sent
}(),
"bucket",
},
args{
encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761"),
[]byte("contents-hash"),
[]byte("integrity-hash"),
[]byte("body"),
},
"https://s3bucket/hashkey",
"hashkey",
1,
false,
},
{
"err-put-message",
fields{
nil,
func() stores.Sent {
sent := storestest.NewMockSent(mockCtrl)
var id mail.ID
id = encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761")
sent.EXPECT().Key(id, []byte("contents-hash"), []byte("body")).Return("messageID-hash")
sent.EXPECT().PutMessage(id, []byte("contents-hash"), []byte("body"), nil).Return("", "", envelope.MLIMailchain, errors.Errorf("put failed"))
return sent
}(),
"bucket",
},
args{
encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761"),
[]byte("contents-hash"),
[]byte("integrity-hash"),
[]byte("body"),
},
"",
"",
1,
true,
},
{
"err-empty-key",
fields{
nil,
func() stores.Sent {
sent := storestest.NewMockSent(mockCtrl)
var id mail.ID
id = encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761")
sent.EXPECT().Key(id, []byte("contents-hash"), []byte("body")).Return("")
sent.EXPECT().PutMessage(id, []byte("contents-hash"), []byte("body"), nil).Return("https://s3bucket/hashkey", "hashkey", envelope.MLIMailchain, nil)
return sent
}(),
"bucket",
},
args{
encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761"),
[]byte("contents-hash"),
[]byte("integrity-hash"),
[]byte("body"),
},
"",
"",
1,
true,
},
{
"err-inconsistent-key",
fields{
nil,
func() stores.Sent {
sent := storestest.NewMockSent(mockCtrl)
var id mail.ID
id = encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761")
sent.EXPECT().Key(id, []byte("contents-hash"), []byte("body")).Return("messageIDother-hashother")
sent.EXPECT().PutMessage(id, []byte("contents-hash"), []byte("body"), nil).Return("https://s3bucket/hashkey", "hashkey", envelope.MLIMailchain, nil)
return sent
}(),
"bucket",
},
args{
encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761"),
[]byte("contents-hash"),
[]byte("integrity-hash"),
[]byte("body"),
},
"",
"",
1,
true,
},
{
"err-inconsistent-resource",
fields{
nil,
func() stores.Sent {
sent := storestest.NewMockSent(mockCtrl)
var id mail.ID
id = encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761")
sent.EXPECT().Key(id, []byte("contents-hash"), []byte("body")).Return("hashkey")
sent.EXPECT().PutMessage(id, []byte("contents-hash"), []byte("body"), nil).Return("https://s3bucket/hashkey", "inconsistent-resource", envelope.MLIMailchain, nil)
return sent
}(),
"bucket",
},
args{
encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761"),
[]byte("contents-hash"),
[]byte("integrity-hash"),
[]byte("body"),
},
"",
"",
1,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := S3Store{
headObjectFunc: tt.fields.headObjectFunc,
sent: tt.fields.sent,
bucket: tt.fields.bucket,
}
gotAddress, gotResource, gotMLI, err := s.Put(tt.args.messageID, tt.args.contentsHash, tt.args.integrityHash, tt.args.contents)
if (err != nil) != tt.wantErr {
t.Errorf("S3Store.Put() error = %v, wantErr %v", err, tt.wantErr)
return
}
if gotAddress != tt.wantAddress {
t.Errorf("S3Store.Put() Address = %v, wantAddress %v", gotAddress, tt.wantAddress)
}
if gotResource != tt.wantResource {
t.Errorf("S3Store.Put() Resource = %v, wantResource %v", gotResource, tt.wantResource)
}
if gotMLI != tt.wantMLI {
t.Errorf("S3Store.Put() MLI = %v, want %v", gotMLI, tt.wantMLI)
}
})
}
}
func TestS3Store_Exists(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
type fields struct {
headObjectFunc func(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error)
sent stores.Sent
bucket string
}
type args struct {
messageID mail.ID
contentsHash []byte
integrityHash []byte
contents []byte
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
"success",
fields{
func(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
return nil, awserr.New("NotFound", "test error", nil)
},
func() stores.Sent {
sent := storestest.NewMockSent(mockCtrl)
var id mail.ID
id = encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761")
sent.EXPECT().Key(id, []byte("contents-hash"), []byte("body")).Return("messageID-hash")
return sent
}(),
"bucket",
},
args{
encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761"),
[]byte("contents-hash"),
[]byte("integrity-hash"),
[]byte("body"),
},
false, | },
{
"err-non-aws-err",
fields{
func(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
return nil, errors.Errorf("other error")
},
func() stores.Sent {
sent := storestest.NewMockSent(mockCtrl)
var id mail.ID
id = encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761")
sent.EXPECT().Key(id, []byte("contents-hash"), []byte("body")).Return("messageID-hash")
return sent
}(),
"bucket",
},
args{
encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761"),
[]byte("contents-hash"),
[]byte("integrity-hash"),
[]byte("body"),
},
true,
},
{
"err-exists",
fields{
func(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
return &s3.HeadObjectOutput{}, nil
},
func() stores.Sent {
sent := storestest.NewMockSent(mockCtrl)
var id mail.ID
id = encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761")
sent.EXPECT().Key(id, []byte("contents-hash"), []byte("body")).Return("messageID-hash")
return sent
}(),
"bucket",
},
args{
encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761"),
[]byte("contents-hash"),
[]byte("integrity-hash"),
[]byte("body"),
},
true,
},
{
"err-different-aws-err",
fields{
func(input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) {
return nil, awserr.New("Forbidden", "test error", nil)
},
func() stores.Sent {
sent := storestest.NewMockSent(mockCtrl)
var id mail.ID
id = encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761")
sent.EXPECT().Key(id, []byte("contents-hash"), []byte("body")).Return("messageID-hash")
return sent
}(),
"bucket",
},
args{
encodingtest.MustDecodeHex("5602ea95540bee46d03ba335eed6f49d117eab95c8ab8b71bae2cdd1e564a761"),
[]byte("contents-hash"),
[]byte("integrity-hash"),
[]byte("body"),
},
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := S3Store{
headObjectFunc: tt.fields.headObjectFunc,
sent: tt.fields.sent,
bucket: tt.fields.bucket,
}
if err := s.Exists(tt.args.messageID, tt.args.contentsHash, tt.args.integrityHash, tt.args.contents); (err != nil) != tt.wantErr {
t.Errorf("S3Store.Exists() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
} | |
online_backtranslation.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import json
import logging
import math
import os
from argparse import Namespace
from collections import OrderedDict, defaultdict
from pathlib import Path
from typing import Dict, Sequence, Tuple
from argparse import ArgumentError
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import fairseq
from fairseq import metrics, options, utils
from fairseq.data import (
FairseqDataset,
LanguagePairDataset,
NoisingDataset,
PrependTokenDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
data_utils,
encoders,
)
from fairseq.sequence_generator_rl import SequenceGenerator
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
logger = logging.getLogger(__name__)
class PiecewiseLinearFn:
"""Piecewise linear function. Can be configured with a string."""
def __init__(self, pieces: Sequence[Tuple[int, float]]):
assert pieces == sorted(
pieces
), f"PiecewiseLinearFn configuration should be sorted, received: {pieces}"
self.pieces = pieces
def __call__(self, x: int) -> float:
for i, (x_a, y_a) in enumerate(self.pieces[:-1]):
x_b, y_b = self.pieces[i + 1]
if x_a <= x <= x_b:
return y_a + (x - x_a) * (y_b - y_a) / (x_b - x_a)
return self.pieces[-1][1]
@staticmethod
def from_string(configuration: str) -> "PiecewiseLinearFn":
"""
Parse the configuration of lambda coefficient (for scheduling).
x = "3" # lambda will be a constant equal to x
x = "0:1,1000:0" # lambda will start from 1 and linearly decrease
# to 0 during the first 1000 iterations
x = "0:0,1000:0,2000:1" # lambda will be equal to 0 for the first 1000
# iterations, then will linearly increase to 1 until iteration 2000
"""
if isinstance(configuration, float):
return PiecewiseLinearFn([(0, configuration)])
try:
parts = configuration.split(",")
if len(parts) == 1:
v = float(configuration)
return PiecewiseLinearFn([(0, v)])
split = [s.split(":") for s in parts]
pieces = [(int(t), float(v)) for t, v in split]
return PiecewiseLinearFn(pieces)
except Exception:
raise ValueError(
f"Invalid PiecewiseLinearFn configuration: {configuration!r}"
)
@staticmethod
def one() -> "PiecewiseLinearFn":
return PiecewiseLinearFn([(0, 1.0)])
@register_task("online_backtranslation")
class OnlineBackTranslationTask(TranslationTask):
|
@torch.no_grad()
def extend_embedding(
emb: nn.Module, new_vocab_size: int, copy_from_token_id: int
) -> None:
old_emb_data = emb.weight.data
(old_vocab_size, dim) = old_emb_data.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
emb.weight.data = torch.zeros((new_vocab_size, dim))
emb.weight.data[:old_vocab_size, :] = old_emb_data
# initialize new embeddings
emb.weight.data[old_vocab_size:, :] = old_emb_data[copy_from_token_id]
if hasattr(emb, "num_embeddings"):
emb.num_embeddings = new_vocab_size
if hasattr(emb, "out_features"):
emb.out_features = new_vocab_size
if getattr(emb, "bias", None) is None:
return
# Fix the bias.
# Bias shape can be different from the previous vocab size
# if the weight matrix was shared and alread extended but not the bias.
(old_vocab_size,) = emb.bias.shape
assert new_vocab_size >= old_vocab_size
if new_vocab_size > old_vocab_size:
old_bias = emb.bias.data
new_bias = torch.zeros(
(new_vocab_size,), dtype=old_bias.dtype, device=old_bias.device
)
new_bias[:old_vocab_size] = old_bias
emb.bias.data = new_bias
def add_secial_tokens_to_dict_and_model(
dictionary: "fairseq.data.Dictionary",
model: nn.Module,
mono_langs: Sequence[str],
) -> None:
embs = model.encoder.embed_tokens
vocab_size, embedding_dim = embs.weight.shape
# The model may or may not have a '<mask>' embedding yet
assert (
len(dictionary) <= vocab_size <= len(dictionary) + 1
), f"Dictionary len ({len(dictionary)}) doesn't match embs shape ({embs.weight.shape})"
# TODO: we should reuse the pretrained model dict which already has <mask>
dictionary.add_symbol("<mask>")
for lang in mono_langs:
lang_token = _lang_token(lang)
dictionary.add_symbol(lang_token)
logger.info(
f"dictionary: {len(dictionary)} -> {vocab_size} tokens "
f"after adding {len(mono_langs)} lang tokens."
)
if len(dictionary) <= vocab_size:
return
extend_embedding(embs, len(dictionary), dictionary.bos())
dec_embs = model.decoder.embed_tokens
extend_embedding(dec_embs, len(dictionary), dictionary.bos())
lm_head = model.decoder.output_projection
extend_embedding(lm_head, len(dictionary), dictionary.bos())
assert lm_head.weight.shape == (len(dictionary), embedding_dim)
def _lang_token(lang: str) -> str:
return f"__{lang}__"
def _lang_token_index(dictionary, lang: str) -> int:
return dictionary.index(_lang_token(lang))
@contextlib.contextmanager
def assert_weights_have_changed(model: nn.Module):
def checksum(model: nn.Module) -> float:
return sum(p.sum().item() for p in model.parameters())
initial_checksum = checksum(model)
yield model
final_checksum = checksum(model)
logger.info(
f"initial_checksum={initial_checksum} -> final_checksum={final_checksum}"
)
assert initial_checksum != final_checksum, "Model hasn't changed !"
| @staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
# Generic translation args
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner; \
however, valid and test data are always in the first directory to \
avoid the need for repeating them in all directories')
parser.add_argument('--mono-langs', metavar='MONO_LANGS',
help='monolingual languages for training')
parser.add_argument('--valid-lang-pairs', default=None, metavar='VALID_LANG_PAIRS',
help='language pairs for validation')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='False', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
try:
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
except ArgumentError:
# this might have already been defined. Once we transition this to hydra it should be fine to add it here.
pass
parser.add_argument('--truncate-source', action='store_true', default=False,
help='truncate source to max-source-positions')
parser.add_argument('--num-batch-buckets', default=0, type=int, metavar='N',
help='if >0, then bucket source and target lengths into N '
'buckets and pad accordingly; this is useful on TPUs '
'to minimize the number of compilations')
# Denoising args
parser.add_argument('--max-word-shuffle-distance', default=3.0, type=float, metavar='N',
help='maximum word shuffle distance for denoising autoencoding data generation')
parser.add_argument('--word-dropout-prob', default=0.1, type=float, metavar='N',
help='word dropout probability for denoising autoencoding data generation')
parser.add_argument('--word-blanking-prob', default=0.2, type=float, metavar='N',
help='word blanking probability for denoising autoencoding data generation')
# Backtranslation args
parser.add_argument('--lambda-bt', default="1.0", type=str, metavar='N',
help='back-translation weight')
parser.add_argument('--lambda-dae', default="1.0", type=str, metavar='N',
help='denoising auto-encoder weight')
# Evaluation args
parser.add_argument('--generate-one-by-one', action='store_true',
help='generate one sentence at a time for backtranslation')
parser.add_argument('--eval-bleu', action='store_true',
help='evaluation with BLEU scores')
parser.add_argument('--eval-bleu-detok', type=str, default="space",
help='detokenize before computing BLEU (e.g., "moses"); '
'required if using --eval-bleu; use "space" to '
'disable detokenization; see fairseq.data.encoders '
'for other options')
parser.add_argument('--eval-bleu-detok-args', type=str, metavar='JSON',
help='args for building the tokenizer, if needed')
parser.add_argument('--eval-tokenized-bleu', action='store_true', default=False,
help='compute tokenized BLEU instead of sacrebleu')
parser.add_argument('--eval-bleu-remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE before computing BLEU')
parser.add_argument('--eval-bleu-args', type=str, metavar='JSON',
help='generation args for BLUE scoring, '
'e.g., \'{"beam": 4, "lenpen": 0.6}\'')
parser.add_argument('--eval-bleu-print-samples', action='store_true',
help='print sample generations during validation')
# fmt: on
def __init__(self, args, common_dict, mono_langs, valid_lang_pairs):
super().__init__(args, common_dict, common_dict)
self.common_dict = common_dict
self.mono_langs = mono_langs
self.valid_lang_pairs = valid_lang_pairs
self.SHOW_SAMPLES_INTERVAL = 1000
# Start by showing samples
self._show_samples_ctr = self.SHOW_SAMPLES_INTERVAL
self.SHOW_SAMPLES_NUMBER = 5
self.lambda_bt = PiecewiseLinearFn.from_string(args.lambda_bt)
self.lambda_dae = PiecewiseLinearFn.from_string(args.lambda_dae)
self.args = args
self.data = utils.split_paths(self.args.data)
if len(self.data) == 1:
shards = list(Path(self.data[0]).glob("shard*"))
if len(shards) > 0:
# keep this as strings, since it can also be a manifold path
old_data = self.data
self.data = [str(shard) for shard in shards]
logging.warning(f"Expanded data directory {old_data} to {self.data}")
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
paths = utils.split_paths(args.data)
assert len(paths) > 0
assert args.mono_langs is not None
mono_langs = args.mono_langs.split(",")
valid_lang_pairs = args.valid_lang_pairs.split(",")
# load dictionary
dict_path = os.path.join(paths[0], "dict.txt")
common_dict = cls.load_dictionary(dict_path)
return cls(args, common_dict, mono_langs, valid_lang_pairs)
def load_dataset(self, split, epoch=1, combine=False, **kwargs) -> FairseqDataset:
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split == "train":
data_path = self.data[(epoch - 1) % len(self.data)]
dataset = self.load_train_dataset(data_path)
else:
# valid/test should always be the same.
dataset = self.load_translation_dataset(split, self.data[0])
self.datasets[split] = dataset
return dataset
def load_train_dataset(self, data_path: str) -> FairseqDataset:
"""The training dataset is made of backtranslation dataset and denoising dataset."""
data = []
for lang in self.mono_langs:
train_path = os.path.join(data_path, lang, "train")
# TODO: could we do the BT using denoise sample ?
# this would half the data loading work
data.append((f"{lang}-BT", self.load_bt_dataset(train_path, lang)))
data.append(
(f"{lang}-DENOISE", self.load_denoise_dataset(train_path, lang))
)
return RoundRobinZipDatasets(OrderedDict(data))
def _langpair_dataset(
self, src: FairseqDataset, tgt: FairseqDataset
) -> LanguagePairDataset:
return LanguagePairDataset(
src,
src.sizes,
self.dictionary,
tgt=tgt,
tgt_sizes=tgt.sizes,
tgt_dict=self.dictionary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
# TODO: should we shuffle ? we are already sorting batch by sizes so ?
# shuffle=True,
)
def _prepend_lang_bos_to_target(
self, dataset: LanguagePairDataset, lang: str
) -> LanguagePairDataset:
bos = _lang_token_index(self.dictionary, lang)
return TransformEosLangPairDataset(
dataset,
src_eos=self.dictionary.eos(),
new_src_eos=self.dictionary.eos(),
tgt_bos=self.dictionary.eos(),
new_tgt_bos=bos,
)
def load_bt_dataset(self, data_path: str, lang: str) -> FairseqDataset:
"""The BT dataset is generated with (tgt, tgt) pairs.
The actual translation to a (generated_src, tgt) pair
is done on the fly during training.
"""
mono_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
assert mono_dataset is not None, f"No dataset found for {lang}"
mono_dataset_src = PrependTokenDataset(
mono_dataset, _lang_token_index(self.dictionary, lang)
)
mono_dataset_bt = self._langpair_dataset(mono_dataset_src, mono_dataset)
logger.info(
f"mono_lang = {lang} "
f"lang token index = {_lang_token_index(self.dictionary, lang)} "
f"lang token = {_lang_token(lang)}"
)
mono_dataset_bt = self._prepend_lang_bos_to_target(mono_dataset_bt, lang)
return mono_dataset_bt
def load_denoise_dataset(self, data_path: str, lang: str) -> FairseqDataset:
"""Classic denoising dataset"""
dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
noisy_dataset = NoisingDataset(
dataset,
self.dictionary,
seed=1,
max_word_shuffle_distance=self.args.max_word_shuffle_distance,
word_dropout_prob=self.args.word_dropout_prob,
word_blanking_prob=self.args.word_blanking_prob,
)
noisy_dataset = PrependTokenDataset(
noisy_dataset, _lang_token_index(self.dictionary, lang)
)
clean_dataset = data_utils.load_indexed_dataset(
data_path, self.common_dict, self.args.dataset_impl
)
denoising_dataset = self._langpair_dataset(noisy_dataset, clean_dataset)
denoising_dataset = self._prepend_lang_bos_to_target(denoising_dataset, lang)
return denoising_dataset
def load_translation_dataset(
self, split: str, data_path: str, combine: bool = False
):
# only judging with one language pair for the moment,
# since ConcatDataset doesn't work as expected
assert len(self.valid_lang_pairs) == 1, "For now..."
valid_lang_pair = self.valid_lang_pairs[0]
src, tgt = valid_lang_pair.split("-")
# use the same function than TranslationTask
src_tgt_dt = load_langpair_dataset(
data_path,
split,
src,
self.common_dict,
tgt,
self.common_dict,
combine=combine,
dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
num_buckets=self.args.num_batch_buckets,
shuffle=(split != "test"),
prepend_bos_src=_lang_token_index(self.dictionary, src),
)
src_tgt_eos_dt = self._prepend_lang_bos_to_target(src_tgt_dt, tgt)
src_tgt_eos_dt.args = self.args
return src_tgt_eos_dt
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
raise NotImplementedError
def build_model(self, args, from_checkpoint=False):
# torch.autograd.set_detect_anomaly(True)
model = super().build_model(args, from_checkpoint)
add_secial_tokens_to_dict_and_model(self.common_dict, model, self.mono_langs)
self.sequence_generators = {}
for mono_lang in self.mono_langs:
self.sequence_generators[mono_lang] = SequenceGenerator(
[model],
tgt_dict=self.dictionary,
beam_size=1,
max_len_a=1.3,
max_len_b=5,
min_len=5,
# keep 1 to be able to prepend bos
max_len=model.max_decoder_positions() - 1,
)
if getattr(args, "eval_bleu", False):
assert getattr(args, "eval_bleu_detok", None) is not None, (
"--eval-bleu-detok is required if using --eval-bleu; "
"try --eval-bleu-detok=moses (or --eval-bleu-detok=space "
"to disable detokenization, e.g., when using sentencepiece)"
)
detok_args = json.loads(getattr(args, "eval_bleu_detok_args", "{}") or "{}")
self.tokenizer = encoders.build_tokenizer(
Namespace(
tokenizer=getattr(args, "eval_bleu_detok", None), **detok_args
)
)
gen_args = json.loads(getattr(args, "eval_bleu_args", "{}") or "{}")
self.bleu_sequence_generator = self.build_generator(
[model], Namespace(**gen_args)
)
return model
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.common_dict
def display_samples_once_in_a_while(self, smp, mono_lang, other_lang):
self._show_samples_ctr += 1
if self._show_samples_ctr < self.SHOW_SAMPLES_INTERVAL:
return
self._show_samples_ctr = 0
ln = smp["net_input"]["src_tokens"].shape[0]
logger.info(
f"(r:{self.args.distributed_rank}) : "
f"{other_lang} ---> {mono_lang} "
f"({other_lang} was generated by back-translation.) {ln} samples"
)
for i in range(min(ln, self.SHOW_SAMPLES_NUMBER)):
src_tokens = smp["net_input"]["src_tokens"][i]
tgt_tokens = smp["target"][i]
src_str = self.dictionary.string(src_tokens, "sentencepiece")
tgt_str = self.dictionary.string(tgt_tokens, "sentencepiece")
logger.info(
f"\n{i}\t\t[{other_lang} generated] {src_str}\n"
f"\t\t[{mono_lang} original ] {tgt_str}\n"
f"\t\t[ src tokens] {src_tokens}\n"
)
def backtranslate_sample(self, smp, orig_lang, other_lang) -> None:
"""
* WARNING: smp is modified in place.
* At the start of this function, `smp` has the same input and target:
|--------------------------------------------------------|
| smp['net_input']['src_tokens'] | smp['target'] |
| (from data) __en__ hello world | __en__ hello world |
|--------------------------------------------------------|
* We call generator.generate(smp, bos_token = token("ro")),
and copy the result as input
* At the end, `smp` has the translation to other language.
|--------------------------------------------------------|
| smp['net_input']['src_tokens'] | smp['target'] |
| (generated) __ro__ salut lume | __en__ hello world |
|--------------------------------------------------------|
"""
bos_token = _lang_token_index(self.dictionary, other_lang)
generated = self.sequence_generators[orig_lang].generate(
models=[], sample=smp, bos_token=bos_token
)
max_lngth = max([gn[0]["tokens"].size(0) for gn in generated])
net_input = smp["net_input"]
n_src_tokens = torch.empty(
size=(len(generated), max_lngth + 1), dtype=net_input["src_tokens"].dtype
)
n_src_lengths = torch.empty(
len(generated), dtype=net_input["src_lengths"].dtype
)
for i, gn in enumerate(generated):
tokens = gn[0]["tokens"]
tokens_size = tokens.size(0)
padding_needed = max_lngth - tokens_size
tokens = torch.cat([tokens.new([bos_token]), tokens])
tokens = F.pad(tokens, (0, padding_needed), value=self.dictionary.pad())
n_src_tokens[i] = tokens
n_src_lengths[i] = tokens_size + 1
device = net_input["src_tokens"].device
# This seems to be important
del net_input["src_tokens"]
del net_input["src_lengths"]
net_input["src_tokens"] = n_src_tokens.to(device)
net_input["src_lengths"] = n_src_lengths.to(device)
def generate(self, smp, model):
model.eval()
orig_lang = (
self.dictionary[smp["net_input"]["src_tokens"][0][0]]
.replace(" ", "")
.replace("_", "")
)
bos_token = smp["net_input"]["prev_output_tokens"][0][0]
with torch.no_grad():
generated = self.sequence_generators[orig_lang].generate(
models=[model], sample=smp, bos_token=bos_token
)
return generated
def get_other_lang(self, lang):
# TODO: allow more complex mapping
if lang != self.mono_langs[0]:
return self.mono_langs[0]
if len(self.mono_langs) == 2:
return self.mono_langs[1]
return self.mono_langs[np.random.randint(1, len(self.mono_langs))]
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
agg_loss, agg_sample_size = 0.0, 0.0
agg_logging_output: Dict[str, float] = defaultdict(float)
dataset_keys = self.datasets["train"].datasets.keys()
weights = {
"BT": self.lambda_bt(update_num),
"DENOISE": self.lambda_dae(update_num),
}
log_keys = {"BT": "bt_", "DENOISE": "dae_"}
for dataset_key in dataset_keys:
smp = sample[dataset_key]
mono_lang, task_subtype = dataset_key.split("-")
if weights[task_subtype] == 0:
continue
if task_subtype == "BT":
with torch.autograd.profiler.record_function("backtranslation"):
model.eval()
# TODO: Could we translate to several language at once ?
# this would allow to share encoder_out and maximize GPU usage.
other_lang = self.get_other_lang(mono_lang)
self.backtranslate_sample(smp, mono_lang, other_lang)
self.display_samples_once_in_a_while(smp, mono_lang, other_lang)
model.train()
# Like in FairseqTask.train_step
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, smp)
loss *= weights[task_subtype]
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
agg_loss += loss.item()
agg_sample_size += sample_size
for k in logging_output:
agg_logging_output[log_keys[task_subtype] + k] += logging_output[k]
agg_logging_output[k] += logging_output[k]
return agg_loss, agg_sample_size, agg_logging_output
def get_bos_token_from_sample(self, sample):
net_input = sample["net_input"]
source_lang_token_id = torch.unique(net_input["src_tokens"][:, 0]).item()
source_lang_token = self.dictionary[source_lang_token_id].replace("_", "")
target_lang_token_id = _lang_token_index(
self.dictionary, self.get_other_lang(source_lang_token)
)
return target_lang_token_id
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
bt_sample_size = sum(x.get("bt_sample_size", 0) for x in logging_outputs)
if bt_sample_size:
bt_loss_sum = sum(x.get("bt_loss", 0) for x in logging_outputs)
bt_loss_sum *= 1 / bt_sample_size / math.log(2)
metrics.log_scalar("bt_loss", bt_loss_sum, bt_sample_size, round=3)
bt_nll_loss_sum = sum(x.get("bt_nll_loss", 0) for x in logging_outputs)
bt_ntokens = sum(x.get("bt_ntokens", 0) for x in logging_outputs)
bt_nll_loss_sum *= 1 / bt_ntokens / math.log(2)
metrics.log_scalar("bt_nll_loss", bt_nll_loss_sum, bt_ntokens, round=3)
metrics.log_derived(
"bt_ppl", lambda meters: utils.get_perplexity(meters["bt_nll_loss"].avg)
)
dae_sample_size = sum(x.get("dae_sample_size", 0) for x in logging_outputs)
if dae_sample_size:
dae_loss_sum = sum(x.get("dae_loss", 0) for x in logging_outputs)
dae_loss_sum *= 1 / dae_sample_size / math.log(2)
metrics.log_scalar("dae_loss", dae_loss_sum, dae_sample_size, round=3)
dae_nll_loss_sum = sum(x.get("dae_nll_loss", 0) for x in logging_outputs)
dae_ntokens = sum(x.get("dae_ntokens", 0) for x in logging_outputs)
dae_nll_loss_sum *= 1 / dae_ntokens / math.log(2)
metrics.log_scalar("dae_nll_loss", dae_nll_loss_sum, dae_ntokens, round=3)
metrics.log_derived(
"dae_ppl",
lambda meters: utils.get_perplexity(meters["dae_nll_loss"].avg),
) |
main.rs | use crossterm::{
event::{self, Event as CEvent, KeyCode},
execute,
terminal::{disable_raw_mode, enable_raw_mode, EnterAlternateScreen, LeaveAlternateScreen},
};
// use futures::{stream, StreamExt};
use decom_core::{docker_compose, LogCollector};
use std::{
error::Error,
io,
result::Result,
sync::mpsc,
thread,
time::{Duration, Instant},
};
use tui::{
backend::CrosstermBackend,
text::Spans,
widgets::{Block, Borders, Paragraph, Wrap},
Terminal,
};
enum Event<I> {
Input(I),
Tick,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error>> {
enable_raw_mode()?;
let mut stdout = io::stdout();
execute!(stdout, EnterAlternateScreen)?;
let backend = CrosstermBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
terminal.clear()?;
let (tx, rx) = mpsc::channel();
let tick_rate = Duration::from_millis(250);
thread::spawn(move || {
let mut last_tick = Instant::now();
loop {
// poll for tick rate duration, if no events, sent tick event.
let timeout = tick_rate
.checked_sub(last_tick.elapsed())
.unwrap_or_else(|| Duration::from_secs(0));
if event::poll(timeout).unwrap() {
if let CEvent::Key(key) = event::read().unwrap() {
tx.send(Event::Input(key)).unwrap();
}
}
if last_tick.elapsed() >= tick_rate {
tx.send(Event::Tick).unwrap();
last_tick = Instant::now();
}
}
});
let services = docker_compose::services().await?;
// println!("main: services: {:?}", services);
let mut collectors = vec![];
services.iter().for_each(|s| {
let mut collector = LogCollector::new(&s.service_name, &s.container_name);
collector.start();
collectors.push(collector);
// println!("main: collector: '{}' started", s.service_name);
});
// let collectors = stream::select_all(collectors);
/*
thread::spawn(|| {
while let Some((service_name, total, diff)) = collectors.next().await {
println!("main: {}: {} [+{}]", service_name, total, diff);
}
});
*/
let max = (services.len() - 1) as i32;
let mut current = 0;
loop {
let _ = terminal.draw(|f| {
let service = services.get(current as usize).unwrap();
let size = f.size();
let block = Block::default()
.title(service.service_name.clone())
.borders(Borders::ALL);
let collector = collectors.get(current as usize).unwrap();
let text: Vec<Spans> = collector
.slice()
.into_iter()
.map(|line| Spans::from(line))
.collect(); | match rx.recv() {
Ok(Event::Input(event)) => match event.code {
KeyCode::Char('h') => {
current = clamp(current, -1, max);
}
KeyCode::Char('l') => {
current = clamp(current, 1, max);
}
KeyCode::Char('q') => {
disable_raw_mode();
execute!(terminal.backend_mut(), LeaveAlternateScreen);
terminal.show_cursor();
break;
}
_ => {}
},
_ => (),
}
}
Ok(())
}
fn clamp(n: i32, d: i32, max: i32) -> i32 {
let mut n = n + d;
if n < 0 {
n = 0;
} else if n > max {
n = max;
}
n
} | let paragraph = Paragraph::new(text).block(block).wrap(Wrap { trim: true });
f.render_widget(paragraph, size);
});
|
cmd.go | // Copyright (c) 2019, RetailNext, Inc. | // of RetailNext, Inc.
// All rights reserved.
package restore
import "gopkg.in/alecthomas/kingpin.v2"
var (
Cmd = kingpin.Command("restore", "")
HostCmd = Cmd.Command("host", "Restore this host from backup")
ClusterCmd = Cmd.Command("cluster", "Download from multiple hosts' backups")
hostCmdDryRun = HostCmd.Flag("dry-run", "Don't actually download files").Bool()
hostCmdAllowChangedFiles = HostCmd.Flag("allow-changed", "Allow restoration of files that changed between manifests").Bool()
hostCmdNotBefore = HostCmd.Flag("not-before", "Ignore manifests before this time (unix seconds)").Int64()
hostCmdNotAfter = HostCmd.Flag("not-after", "Ignore manifests after this time (unix seconds)").Int64()
hostCmdCluster = HostCmd.Flag("cluster", "Use a different cluster name when selecting a backup to restore.").String()
hostCmdHostname = HostCmd.Flag("hostname", "Use a specific hostname when selecting a backup to restore.").String()
hostCmdHostnamePattern = HostCmd.Flag("hostname-pattern", "Use a prefix pattern when selecting a backup to restore.").String()
clusterCmdDryRun = ClusterCmd.Flag("dry-run", "Don't actually download files").Bool()
clusterCmdTargetDirectory = ClusterCmd.Flag("target", "A subdirectory will be created under this for each host.").Required().String()
clusterCmdNotBefore = ClusterCmd.Flag("not-before", "Ignore manifests before this time (unix seconds)").Int64()
clusterCmdNotAfter = ClusterCmd.Flag("not-after", "Ignore manifests after this time (unix seconds)").Int64()
clusterCmdCluster = ClusterCmd.Flag("cluster", "Download files for hosts in this cluster").Required().String()
clusterCmdHostnamePattern = ClusterCmd.Flag("hostname-pattern", "Download for hosts matching this prefix.").Required().String()
clusterCmdTables = ClusterCmd.Flag("table", "Download files for these tables (keyspace.table)").Required().Strings()
clusterCmdSkipIndexes = ClusterCmd.Flag("skip-indexes", "Skip downloading indexes").Default("True").Bool()
) | // This material contains trade secrets and confidential information of
// RetailNext, Inc. Any use, reproduction, disclosure or dissemination
// is strictly prohibited without the explicit written permission |
cliente.service.js | import * as tslib_1 from "tslib";
import { Injectable } from '@angular/core';
import { HttpHeaders, HttpClient } from '@angular/common/http';
//realizar a conexao com o servidor carregando o driver do banco
var API_URL = 'http://localhost:3000';
var httpOptions = {
headers: new HttpHeaders({ 'Content-Type': 'application/json; charset=utf-8' })
};
var ClienteService = /** @class */ (function () {
function ClienteService(http) {
this.http = http;
}
//crud
//inserir dados do cliente | //pesquisar 1 cliente pelo email
ClienteService.prototype.getCliente = function (email) {
return this.http.get(API_URL + "/cliente?emailCliente=" + email, httpOptions);
};
//Pegar todos os clientes do banco de dados
ClienteService.prototype.getAllCliente = function () {
return this.http.get(API_URL + "/cliente", httpOptions);
};
//Atualizar os dados do Cliente
ClienteService.prototype.updateCliente = function (cliente) {
return this.http.post(API_URL + "/cliente", cliente, httpOptions);
};
//Apagar registro
ClienteService.prototype.deletarCliente = function (email) {
return this.http.delete(API_URL + "/cliente/" + email, httpOptions);
};
ClienteService = tslib_1.__decorate([
Injectable({
providedIn: 'root'
})
// configurar manipulação de dados recebidos do formilario
,
tslib_1.__metadata("design:paramtypes", [HttpClient])
], ClienteService);
return ClienteService;
}());
export { ClienteService };
//# sourceMappingURL=cliente.service.js.map | ClienteService.prototype.addCliente = function (cliente) {
return this.http.post(API_URL + "/cliente", cliente, httpOptions);
}; |
cc_pipeline_integration_test.go | //go:build integration
// +build integration
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cloudformation_test
import (
"fmt"
"os"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
awsCF "github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/stretchr/testify/require"
"github.com/aws/copilot-cli/internal/pkg/aws/identity"
"github.com/aws/copilot-cli/internal/pkg/aws/partitions"
"github.com/aws/copilot-cli/internal/pkg/aws/s3"
"github.com/aws/copilot-cli/internal/pkg/config"
"github.com/aws/copilot-cli/internal/pkg/deploy"
"github.com/aws/copilot-cli/internal/pkg/deploy/cloudformation"
"github.com/aws/copilot-cli/internal/pkg/manifest"
"github.com/aws/copilot-cli/internal/pkg/template"
)
func TestCCPipelineCreation(t *testing.T) | {
appSess, err := testSession(nil)
require.NoError(t, err)
appId := identity.New(appSess)
appCallerInfo, err := appId.Get()
require.NoError(t, err)
appDeployer := cloudformation.New(appSess)
t.Run("creates a cross-region pipeline in a region with no environment", func(t *testing.T) {
appCfClient := awsCF.New(appSess)
app := config.Application{
Name: randStringBytes(10),
AccountID: appCallerInfo.Account,
}
pipelineStackName := app.Name + "-pipepiper"
appRoleStackName := fmt.Sprintf("%s-infrastructure-roles", app.Name)
appStackSetName := fmt.Sprintf("%s-infrastructure", app.Name)
// find another region (different from the application region,
// i.e. *sess.Config.Region) for us to deploy an environment in.
envRegion, err := findUnusedRegion("us-west", *appSess.Config.Region)
require.NoError(t, err)
envSess, err := testSession(aws.String(envRegion.ID()))
require.NoError(t, err)
envCfClient := awsCF.New(envSess)
envId := identity.New(envSess)
envCallerInfo, err := envId.Get()
require.NoError(t, err)
envDeployer := cloudformation.New(envSess)
s3Client := s3.New(envSess)
uploader := template.New()
var envBucketName string
var appBucketName string
environmentToDeploy := deploy.CreateEnvironmentInput{
Name: randStringBytes(10),
App: deploy.AppInformation{
AccountPrincipalARN: envCallerInfo.RootUserARN,
Name: app.Name,
},
Version: deploy.LatestEnvTemplateVersion,
}
envStackName := fmt.Sprintf("%s-%s",
environmentToDeploy.App.Name,
environmentToDeploy.Name)
// Make sure we delete the stacks after the test is done
defer func() {
// delete the pipeline first because it relies on stackset
_, err := appCfClient.DeleteStack(&awsCF.DeleteStackInput{
StackName: aws.String(pipelineStackName),
})
require.NoError(t, err)
err = appCfClient.WaitUntilStackDeleteComplete(&awsCF.DescribeStacksInput{
StackName: aws.String(pipelineStackName),
})
require.NoError(t, err)
// Clean up any StackInstances we may have created.
stackInstances, err := appCfClient.ListStackInstances(&awsCF.ListStackInstancesInput{
StackSetName: aws.String(appStackSetName),
})
require.NoError(t, err)
require.Equal(t, 2, len(stackInstances.Summaries))
err = s3Client.EmptyBucket(envBucketName)
require.NoError(t, err)
appS3Client := s3.New(appSess)
err = appS3Client.EmptyBucket(appBucketName)
require.NoError(t, err)
_, err = appCfClient.DeleteStackInstances(&awsCF.DeleteStackInstancesInput{
Accounts: []*string{stackInstances.Summaries[0].Account},
Regions: []*string{aws.String(envRegion.ID()), appSess.Config.Region},
RetainStacks: aws.Bool(false),
StackSetName: aws.String(appStackSetName),
})
require.NoError(t, err)
for _, summary := range stackInstances.Summaries {
sess, err := testSession(summary.Region)
require.NoError(t, err)
client := awsCF.New(sess)
err = client.WaitUntilStackDeleteComplete(&awsCF.DescribeStacksInput{
StackName: summary.StackId,
})
require.NoError(t, err)
}
// Delete the StackSet once all the StackInstances are cleaned up. There could be a delay that
// stack instances are all deleted but still returns OperationInProgressException error.
retry := 0
for ; retry < maxDeleteStackSetRetryNum; retry++ {
if _, err = appCfClient.DeleteStackSet(&awsCF.DeleteStackSetInput{
StackSetName: aws.String(appStackSetName),
}); isOperationInProgress(err) {
time.Sleep(deleteStackSetRetryInterval)
continue
}
require.NoError(t, err)
break
}
require.NotEqual(t, retry, maxDeleteStackSetRetryNum)
_, err = appCfClient.DeleteStack(&awsCF.DeleteStackInput{
StackName: aws.String(appRoleStackName),
})
require.NoError(t, err)
err = appCfClient.WaitUntilStackDeleteComplete(&awsCF.DescribeStacksInput{
StackName: aws.String(appRoleStackName),
})
require.NoError(t, err)
// delete the environment stack once we are done
_, err = envCfClient.DeleteStack(&awsCF.DeleteStackInput{
StackName: aws.String(envStackName),
})
require.NoError(t, err)
err = envCfClient.WaitUntilStackDeleteComplete(&awsCF.DescribeStacksInput{
StackName: aws.String(envStackName),
})
require.NoError(t, err)
}()
// Given both the application stack and env we are deploying to do not
// exist
assertStackDoesNotExist(t, appCfClient, appRoleStackName)
assertStackDoesNotExist(t, envCfClient, envStackName)
// create a stackset
err = appDeployer.DeployApp(&deploy.CreateAppInput{
Name: app.Name,
AccountID: app.AccountID,
Version: deploy.LatestAppTemplateVersion,
})
require.NoError(t, err)
err = appDeployer.AddEnvToApp(&cloudformation.AddEnvToAppOpts{
App: &app,
EnvName: environmentToDeploy.Name,
EnvRegion: envRegion.ID(),
EnvAccountID: envCallerInfo.Account,
})
require.NoError(t, err)
regionalResource, err := appDeployer.GetAppResourcesByRegion(&app, envRegion.ID())
require.NoError(t, err)
envBucketName = regionalResource.S3Bucket
urls, err := uploader.UploadEnvironmentCustomResources(s3.CompressAndUploadFunc(func(key string, objects ...s3.NamedBinary) (string, error) {
return s3Client.ZipAndUpload(envBucketName, key, objects...)
}))
require.NoError(t, err)
environmentToDeploy.CustomResourcesURLs = urls
partition, err := partitions.Region(envRegion.ID()).Partition()
require.NoError(t, err)
environmentToDeploy.ArtifactBucketARN = s3.FormatARN(partition.ID(), envBucketName)
environmentToDeploy.ArtifactBucketKeyARN = regionalResource.KMSKeyARN
// Deploy the environment in the same tools account but in different
// region and wait for it to be complete
require.NoError(t, envDeployer.DeployAndRenderEnvironment(os.Stderr, &environmentToDeploy))
// Ensure that the newly created env stack exists
assertStackExists(t, envCfClient, envStackName)
// Provision resources needed to support a pipeline in a region with
// no existing copilot environment.
err = appDeployer.AddPipelineResourcesToApp(
&app,
*appSess.Config.Region)
require.NoError(t, err)
stackInstances, err := appCfClient.ListStackInstances(&awsCF.ListStackInstancesInput{
StackSetName: aws.String(appStackSetName),
})
require.NoError(t, err)
require.Equal(t, 2, len(stackInstances.Summaries),
"application stack instance should exist")
resources, err := appDeployer.GetRegionalAppResources(&app)
require.NoError(t, err)
artifactBuckets := regionalResourcesToArtifactBuckets(t, resources)
pipelineInput := &deploy.CreatePipelineInput{
AppName: app.Name,
Name: pipelineStackName,
Source: &deploy.CodeCommitSource{
ProviderName: manifest.CodeCommitProviderName,
Branch: "main",
RepositoryURL: "https://us-west-2.console.aws.amazon.com/codesuite/codecommit/repositories/repo-name/browse",
},
Build: deploy.PipelineBuildFromManifest(nil, fmt.Sprintf("copilot/pipelines/%s/", app.Name)),
Stages: []deploy.PipelineStage{
{
AssociatedEnvironment: &deploy.AssociatedEnvironment{
Name: environmentToDeploy.Name,
Region: *appSess.Config.Region,
AccountID: app.AccountID,
},
LocalWorkloads: []string{"frontend", "backend"},
},
},
ArtifactBuckets: artifactBuckets,
}
appRegionResources, err := appDeployer.GetAppResourcesByRegion(&app, *appSess.Config.Region)
require.NoError(t, err)
appBucketName = appRegionResources.S3Bucket
require.NoError(t, appDeployer.CreatePipeline(pipelineInput, appBucketName))
// Ensure that the new stack exists
assertStackExists(t, appCfClient, pipelineStackName)
})
} |
|
query.go | package sqlite
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"strings"
_ "github.com/mattn/go-sqlite3"
"github.com/operator-framework/operator-registry/pkg/api"
"github.com/operator-framework/operator-registry/pkg/registry"
)
type SQLQuerier struct {
db *sql.DB
}
var _ registry.Query = &SQLQuerier{}
func | (dbFilename string) (*SQLQuerier, error) {
db, err := sql.Open("sqlite3", "file:"+dbFilename+"?immutable=true")
if err != nil {
return nil, err
}
return &SQLQuerier{db}, nil
}
func NewSQLLiteQuerierFromDb(db *sql.DB) *SQLQuerier {
return &SQLQuerier{db}
}
func (s *SQLQuerier) ListTables(ctx context.Context) ([]string, error) {
query := "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;"
rows, err := s.db.QueryContext(ctx, query)
if err != nil {
return nil, err
}
defer rows.Close()
tables := []string{}
for rows.Next() {
var tableName sql.NullString
if err := rows.Scan(&tableName); err != nil {
return nil, err
}
if tableName.Valid {
tables = append(tables, tableName.String)
}
}
return tables, nil
}
// ListPackages returns a list of package names as strings
func (s *SQLQuerier) ListPackages(ctx context.Context) ([]string, error) {
query := "SELECT DISTINCT name FROM package"
rows, err := s.db.QueryContext(ctx, query)
if err != nil {
return nil, err
}
defer rows.Close()
packages := []string{}
for rows.Next() {
var pkgName sql.NullString
if err := rows.Scan(&pkgName); err != nil {
return nil, err
}
if pkgName.Valid {
packages = append(packages, pkgName.String)
}
}
return packages, nil
}
func (s *SQLQuerier) GetPackage(ctx context.Context, name string) (*registry.PackageManifest, error) {
query := `SELECT DISTINCT package.name, default_channel, channel.name, channel.head_operatorbundle_name
FROM package INNER JOIN channel ON channel.package_name=package.name
WHERE package.name=?`
rows, err := s.db.QueryContext(ctx, query, name)
if err != nil {
return nil, err
}
defer rows.Close()
var pkgName sql.NullString
var defaultChannel sql.NullString
var channelName sql.NullString
var bundleName sql.NullString
if !rows.Next() {
return nil, fmt.Errorf("package %s not found", name)
}
if err := rows.Scan(&pkgName, &defaultChannel, &channelName, &bundleName); err != nil {
return nil, err
}
pkg := ®istry.PackageManifest{
PackageName: pkgName.String,
DefaultChannelName: defaultChannel.String,
Channels: []registry.PackageChannel{
{
Name: channelName.String,
CurrentCSVName: bundleName.String,
},
},
}
for rows.Next() {
if err := rows.Scan(&pkgName, &defaultChannel, &channelName, &bundleName); err != nil {
return nil, err
}
pkg.Channels = append(pkg.Channels, registry.PackageChannel{Name: channelName.String, CurrentCSVName: bundleName.String})
}
return pkg, nil
}
func (s *SQLQuerier) GetDefaultPackage(ctx context.Context, name string) (string, error) {
query := `SELECT default_channel
FROM package WHERE package.name=?`
rows, err := s.db.QueryContext(ctx, query, name)
if err != nil {
return "", err
}
defer rows.Close()
var defaultChannel sql.NullString
if !rows.Next() {
return "", fmt.Errorf("package %s not found", name)
}
if err := rows.Scan(&defaultChannel); err != nil {
return "", err
}
if !defaultChannel.Valid {
return "", fmt.Errorf("default channel not valid")
}
return defaultChannel.String, nil
}
func (s *SQLQuerier) GetChannelEntriesFromPackage(ctx context.Context, packageName string) ([]registry.ChannelEntryAnnotated, error) {
query := `SELECT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name, op_bundle.version, op_bundle.bundlepath, replaces.operatorbundle_name, replacesbundle.version, replacesbundle.bundlepath
FROM channel_entry
LEFT JOIN channel_entry replaces ON channel_entry.replaces = replaces.entry_id
LEFT JOIN operatorbundle op_bundle ON channel_entry.operatorbundle_name = op_bundle.name
LEFT JOIN operatorbundle replacesbundle ON replaces.operatorbundle_name = replacesbundle.name
WHERE channel_entry.package_name = ?;`
var entries []registry.ChannelEntryAnnotated
rows, err := s.db.QueryContext(ctx, query, packageName)
if err != nil {
return nil, err
}
defer rows.Close()
var pkgName sql.NullString
var channelName sql.NullString
var bundleName sql.NullString
var replaces sql.NullString
var version sql.NullString
var bundlePath sql.NullString
var replacesVersion sql.NullString
var replacesBundlePath sql.NullString
for rows.Next() {
if err := rows.Scan(&pkgName, &channelName, &bundleName, &version, &bundlePath, &replaces, &replacesVersion, &replacesBundlePath); err != nil {
return nil, err
}
channelEntryNode := registry.ChannelEntryAnnotated{
PackageName: pkgName.String,
ChannelName: channelName.String,
BundleName: bundleName.String,
Version: version.String,
BundlePath: bundlePath.String,
Replaces: replaces.String,
ReplacesVersion: replacesVersion.String,
ReplacesBundlePath: replacesBundlePath.String,
}
entries = append(entries, channelEntryNode)
}
return entries, nil
}
func (s *SQLQuerier) GetBundle(ctx context.Context, pkgName, channelName, csvName string) (*api.Bundle, error) {
query := `SELECT DISTINCT channel_entry.entry_id, operatorbundle.name, operatorbundle.bundle, operatorbundle.bundlepath, operatorbundle.version, operatorbundle.skiprange
FROM operatorbundle INNER JOIN channel_entry ON operatorbundle.name=channel_entry.operatorbundle_name
WHERE channel_entry.package_name=? AND channel_entry.channel_name=? AND operatorbundle_name=? LIMIT 1`
rows, err := s.db.QueryContext(ctx, query, pkgName, channelName, csvName)
if err != nil {
return nil, err
}
defer rows.Close()
if !rows.Next() {
return nil, fmt.Errorf("no entry found for %s %s %s", pkgName, channelName, csvName)
}
var entryId sql.NullInt64
var name sql.NullString
var bundle sql.NullString
var bundlePath sql.NullString
var version sql.NullString
var skipRange sql.NullString
if err := rows.Scan(&entryId, &name, &bundle, &bundlePath, &version, &skipRange); err != nil {
return nil, err
}
out := &api.Bundle{}
if bundle.Valid && bundle.String != "" {
out, err = registry.BundleStringToAPIBundle(bundle.String)
if err != nil {
return nil, err
}
}
out.CsvName = name.String
out.PackageName = pkgName
out.ChannelName = channelName
out.BundlePath = bundlePath.String
out.Version = version.String
out.SkipRange = skipRange.String
provided, required, err := s.GetApisForEntry(ctx, entryId.Int64)
if err != nil {
return nil, err
}
out.ProvidedApis = provided
out.RequiredApis = required
dependencies, err := s.GetDependenciesForBundle(ctx, name.String, version.String, bundlePath.String)
if err != nil {
return nil, err
}
out.Dependencies = dependencies
properties, err := s.GetPropertiesForBundle(ctx, name.String, version.String, bundlePath.String)
if err != nil {
return nil, err
}
out.Properties = properties
return out, nil
}
func (s *SQLQuerier) GetBundleForChannel(ctx context.Context, pkgName string, channelName string) (*api.Bundle, error) {
query := `SELECT DISTINCT channel_entry.entry_id, operatorbundle.name, operatorbundle.bundle, operatorbundle.bundlepath, operatorbundle.version, operatorbundle.skiprange FROM channel
INNER JOIN operatorbundle ON channel.head_operatorbundle_name=operatorbundle.name
INNER JOIN channel_entry ON (channel_entry.channel_name = channel.name and channel_entry.package_name=channel.package_name and channel_entry.operatorbundle_name=operatorbundle.name)
WHERE channel.package_name=? AND channel.name=? LIMIT 1`
rows, err := s.db.QueryContext(ctx, query, pkgName, channelName)
if err != nil {
return nil, err
}
defer rows.Close()
if !rows.Next() {
return nil, fmt.Errorf("no entry found for %s %s", pkgName, channelName)
}
var entryId sql.NullInt64
var name sql.NullString
var bundle sql.NullString
var bundlePath sql.NullString
var version sql.NullString
var skipRange sql.NullString
if err := rows.Scan(&entryId, &name, &bundle, &bundlePath, &version, &skipRange); err != nil {
return nil, err
}
out := &api.Bundle{}
if bundle.Valid && bundle.String != "" {
out, err = registry.BundleStringToAPIBundle(bundle.String)
if err != nil {
return nil, err
}
}
out.CsvName = name.String
out.PackageName = pkgName
out.ChannelName = channelName
out.BundlePath = bundlePath.String
out.Version = version.String
out.SkipRange = skipRange.String
provided, required, err := s.GetApisForEntry(ctx, entryId.Int64)
if err != nil {
return nil, err
}
out.ProvidedApis = provided
out.RequiredApis = required
dependencies, err := s.GetDependenciesForBundle(ctx, name.String, version.String, bundlePath.String)
if err != nil {
return nil, err
}
out.Dependencies = dependencies
properties, err := s.GetPropertiesForBundle(ctx, name.String, version.String, bundlePath.String)
if err != nil {
return nil, err
}
out.Properties = properties
return out, nil
}
func (s *SQLQuerier) GetChannelEntriesThatReplace(ctx context.Context, name string) (entries []*registry.ChannelEntry, err error) {
query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name
FROM channel_entry
LEFT OUTER JOIN channel_entry replaces ON channel_entry.replaces = replaces.entry_id
WHERE replaces.operatorbundle_name = ?`
rows, err := s.db.QueryContext(ctx, query, name)
if err != nil {
return
}
defer rows.Close()
entries = []*registry.ChannelEntry{}
for rows.Next() {
var pkgNameSQL sql.NullString
var channelNameSQL sql.NullString
var bundleNameSQL sql.NullString
if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL); err != nil {
return
}
entries = append(entries, ®istry.ChannelEntry{
PackageName: pkgNameSQL.String,
ChannelName: channelNameSQL.String,
BundleName: bundleNameSQL.String,
Replaces: name,
})
}
if len(entries) == 0 {
err = fmt.Errorf("no channel entries found that replace %s", name)
return
}
return
}
func (s *SQLQuerier) GetBundleThatReplaces(ctx context.Context, name, pkgName, channelName string) (*api.Bundle, error) {
query := `SELECT DISTINCT replaces.entry_id, operatorbundle.name, operatorbundle.bundle, operatorbundle.bundlepath, operatorbundle.version, operatorbundle.skiprange
FROM channel_entry
LEFT OUTER JOIN channel_entry replaces ON replaces.replaces = channel_entry.entry_id
INNER JOIN operatorbundle ON replaces.operatorbundle_name = operatorbundle.name
WHERE channel_entry.operatorbundle_name = ? AND channel_entry.package_name = ? AND channel_entry.channel_name = ? LIMIT 1`
rows, err := s.db.QueryContext(ctx, query, name, pkgName, channelName)
if err != nil {
return nil, err
}
defer rows.Close()
if !rows.Next() {
return nil, fmt.Errorf("no entry found for %s %s", pkgName, channelName)
}
var entryId sql.NullInt64
var outName sql.NullString
var bundle sql.NullString
var bundlePath sql.NullString
var version sql.NullString
var skipRange sql.NullString
if err := rows.Scan(&entryId, &outName, &bundle, &bundlePath, &version, &skipRange); err != nil {
return nil, err
}
out := &api.Bundle{}
if bundle.Valid && bundle.String != "" {
out, err = registry.BundleStringToAPIBundle(bundle.String)
if err != nil {
return nil, err
}
}
out.CsvName = outName.String
out.PackageName = pkgName
out.ChannelName = channelName
out.BundlePath = bundlePath.String
out.Version = version.String
out.SkipRange = skipRange.String
provided, required, err := s.GetApisForEntry(ctx, entryId.Int64)
if err != nil {
return nil, err
}
out.ProvidedApis = provided
out.RequiredApis = required
dependencies, err := s.GetDependenciesForBundle(ctx, outName.String, version.String, bundlePath.String)
if err != nil {
return nil, err
}
out.Dependencies = dependencies
properties, err := s.GetPropertiesForBundle(ctx, outName.String, version.String, bundlePath.String)
if err != nil {
return nil, err
}
out.Properties = properties
return out, nil
}
func (s *SQLQuerier) GetChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*registry.ChannelEntry, err error) {
// TODO: join on full fk, not just operatorbundlename
query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name, replaces.operatorbundle_name
FROM channel_entry
INNER JOIN properties ON channel_entry.operatorbundle_name = properties.operatorbundle_name
LEFT OUTER JOIN channel_entry replaces ON channel_entry.replaces = replaces.entry_id
WHERE properties.type=? AND properties.value=?`
value, err := json.Marshal(map[string]string{
"group": group,
"version": version,
"kind": kind,
})
if err != nil {
return nil, err
}
rows, err := s.db.QueryContext(ctx, query, registry.GVKType, string(value))
if err != nil {
return nil, err
}
defer rows.Close()
entries = []*registry.ChannelEntry{}
for rows.Next() {
var pkgNameSQL sql.NullString
var channelNameSQL sql.NullString
var bundleNameSQL sql.NullString
var replacesSQL sql.NullString
if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL); err != nil {
return
}
entries = append(entries, ®istry.ChannelEntry{
PackageName: pkgNameSQL.String,
ChannelName: channelNameSQL.String,
BundleName: bundleNameSQL.String,
Replaces: replacesSQL.String,
})
}
if len(entries) == 0 {
err = fmt.Errorf("no channel entries found that provide %s %s %s", group, version, kind)
return
}
return
}
// Get latest channel entries that provide an api
func (s *SQLQuerier) GetLatestChannelEntriesThatProvide(ctx context.Context, group, version, kind string) (entries []*registry.ChannelEntry, err error) {
query := `SELECT DISTINCT channel_entry.package_name, channel_entry.channel_name, channel_entry.operatorbundle_name, replaces.operatorbundle_name, MIN(channel_entry.depth)
FROM channel_entry
INNER JOIN properties ON channel_entry.operatorbundle_name = properties.operatorbundle_name
LEFT OUTER JOIN channel_entry replaces ON channel_entry.replaces = replaces.entry_id
WHERE properties.type = ? AND properties.value = ?
GROUP BY channel_entry.package_name, channel_entry.channel_name`
value, err := json.Marshal(map[string]string{
"group": group,
"version": version,
"kind": kind,
})
if err != nil {
return nil, err
}
rows, err := s.db.QueryContext(ctx, query, registry.GVKType, string(value))
if err != nil {
return nil, err
}
defer rows.Close()
entries = []*registry.ChannelEntry{}
for rows.Next() {
var pkgNameSQL sql.NullString
var channelNameSQL sql.NullString
var bundleNameSQL sql.NullString
var replacesSQL sql.NullString
var min_depth sql.NullInt64
if err = rows.Scan(&pkgNameSQL, &channelNameSQL, &bundleNameSQL, &replacesSQL, &min_depth); err != nil {
return nil, err
}
entries = append(entries, ®istry.ChannelEntry{
PackageName: pkgNameSQL.String,
ChannelName: channelNameSQL.String,
BundleName: bundleNameSQL.String,
Replaces: replacesSQL.String,
})
}
if len(entries) == 0 {
err = fmt.Errorf("no channel entries found that provide %s %s %s", group, version, kind)
return nil, err
}
return entries, nil
}
// Get the the latest bundle that provides the API in a default channel, error unless there is ONLY one
func (s *SQLQuerier) GetBundleThatProvides(ctx context.Context, group, apiVersion, kind string) (*api.Bundle, error) {
query := `SELECT DISTINCT channel_entry.entry_id, operatorbundle.bundle, operatorbundle.bundlepath, MIN(channel_entry.depth), channel_entry.operatorbundle_name, channel_entry.package_name, channel_entry.channel_name, channel_entry.replaces, operatorbundle.version, operatorbundle.skiprange
FROM channel_entry
INNER JOIN operatorbundle ON operatorbundle.name = channel_entry.operatorbundle_name
INNER JOIN properties ON channel_entry.operatorbundle_name = properties.operatorbundle_name
INNER JOIN package ON package.name = channel_entry.package_name
WHERE properties.type = ? AND properties.value = ? AND package.default_channel = channel_entry.channel_name
GROUP BY channel_entry.package_name, channel_entry.channel_name`
value, err := json.Marshal(map[string]string{
"group": group,
"version": apiVersion,
"kind": kind,
})
rows, err := s.db.QueryContext(ctx, query, registry.GVKType, string(value))
if err != nil {
return nil, err
}
defer rows.Close()
if !rows.Next() {
return nil, fmt.Errorf("no entry found that provides %s %s %s", group, apiVersion, kind)
}
var entryId sql.NullInt64
var bundle sql.NullString
var bundlePath sql.NullString
var min_depth sql.NullInt64
var bundleName sql.NullString
var pkgName sql.NullString
var channelName sql.NullString
var replaces sql.NullString
var version sql.NullString
var skipRange sql.NullString
if err := rows.Scan(&entryId, &bundle, &bundlePath, &min_depth, &bundleName, &pkgName, &channelName, &replaces, &version, &skipRange); err != nil {
return nil, err
}
if !bundle.Valid {
return nil, fmt.Errorf("no entry found that provides %s %s %s", group, apiVersion, kind)
}
out := &api.Bundle{}
if bundle.Valid && bundle.String != "" {
out, err = registry.BundleStringToAPIBundle(bundle.String)
if err != nil {
return nil, err
}
}
out.CsvName = bundleName.String
out.PackageName = pkgName.String
out.ChannelName = channelName.String
out.BundlePath = bundlePath.String
out.Version = version.String
out.SkipRange = skipRange.String
provided, required, err := s.GetApisForEntry(ctx, entryId.Int64)
if err != nil {
return nil, err
}
out.ProvidedApis = provided
out.RequiredApis = required
dependencies, err := s.GetDependenciesForBundle(ctx, bundleName.String, version.String, bundlePath.String)
if err != nil {
return nil, err
}
out.Dependencies = dependencies
properties, err := s.GetPropertiesForBundle(ctx, bundleName.String, version.String, bundlePath.String)
if err != nil {
return nil, err
}
out.Properties = properties
return out, nil
}
func (s *SQLQuerier) ListImages(ctx context.Context) ([]string, error) {
query := "SELECT DISTINCT image FROM related_image"
rows, err := s.db.QueryContext(ctx, query)
if err != nil {
return nil, err
}
defer rows.Close()
images := []string{}
for rows.Next() {
var imgName sql.NullString
if err := rows.Scan(&imgName); err != nil {
return nil, err
}
if imgName.Valid {
images = append(images, imgName.String)
}
}
return images, nil
}
func (s *SQLQuerier) GetImagesForBundle(ctx context.Context, csvName string) ([]string, error) {
query := "SELECT DISTINCT image FROM related_image WHERE operatorbundle_name=?"
rows, err := s.db.QueryContext(ctx, query, csvName)
if err != nil {
return nil, err
}
defer rows.Close()
images := []string{}
for rows.Next() {
var imgName sql.NullString
if err := rows.Scan(&imgName); err != nil {
return nil, err
}
if imgName.Valid {
images = append(images, imgName.String)
}
}
return images, nil
}
func (s *SQLQuerier) GetApisForEntry(ctx context.Context, entryID int64) (provided []*api.GroupVersionKind, required []*api.GroupVersionKind, err error) {
groups := map[string]struct{}{}
kinds := map[string]struct{}{}
versions := map[string]struct{}{}
providedQuery := `SELECT properties.value FROM properties
INNER JOIN channel_entry ON channel_entry.operatorbundle_name = properties.operatorbundle_name
WHERE properties.type=? AND channel_entry.entry_id=?`
providedRows, err := s.db.QueryContext(ctx, providedQuery, registry.GVKType, entryID)
if err != nil {
return nil, nil, err
}
defer providedRows.Close()
provided = []*api.GroupVersionKind{}
for providedRows.Next() {
var value sql.NullString
if err := providedRows.Scan(&value); err != nil {
return nil, nil, err
}
if !value.Valid {
continue
}
prop := registry.GVKProperty{}
if err := json.Unmarshal([]byte(value.String), &prop); err != nil {
continue
}
provided = append(provided, &api.GroupVersionKind{
Group: prop.Group,
Version: prop.Version,
Kind: prop.Kind,
})
groups[prop.Group] = struct{}{}
versions[prop.Version] = struct{}{}
kinds[prop.Kind] = struct{}{}
}
requiredQuery := `SELECT DISTINCT dependencies.value FROM dependencies
INNER JOIN channel_entry ON channel_entry.operatorbundle_name = dependencies.operatorbundle_name
WHERE dependencies.type=? AND channel_entry.entry_id=?`
requiredRows, err := s.db.QueryContext(ctx, requiredQuery, registry.GVKType, entryID)
if err != nil {
return nil, nil, err
}
defer requiredRows.Close()
required = []*api.GroupVersionKind{}
for requiredRows.Next() {
var value sql.NullString
if err := requiredRows.Scan(&value); err != nil {
return nil, nil, err
}
if !value.Valid {
continue
}
dep := registry.GVKDependency{}
if err := json.Unmarshal([]byte(value.String), &dep); err != nil {
continue
}
required = append(required, &api.GroupVersionKind{
Group: dep.Group,
Version: dep.Version,
Kind: dep.Kind,
})
groups[dep.Group] = struct{}{}
versions[dep.Version] = struct{}{}
kinds[dep.Kind] = struct{}{}
}
argsFor := func(s map[string]struct{}) string {
l := []string{}
for v := range s {
l = append(l, "\""+v+"\"")
}
return "(" + strings.Join(l, ",") + ")"
}
pluralQuery := `SELECT * FROM api` +
` WHERE api.group_name IN ` + argsFor(groups) +
` AND api.version IN ` + argsFor(versions) +
` AND api.kind IN ` + argsFor(kinds)
pluralRows, err := s.db.QueryContext(ctx, pluralQuery)
if err != nil {
return nil, nil, err
}
defer pluralRows.Close()
gvkToPlural := map[registry.GVKProperty]string{}
for pluralRows.Next() {
var groupName sql.NullString
var versionName sql.NullString
var kindName sql.NullString
var pluralName sql.NullString
if err := pluralRows.Scan(&groupName, &versionName, &kindName, &pluralName); err != nil {
continue
}
if !groupName.Valid || !versionName.Valid || !kindName.Valid || !pluralName.Valid {
continue
}
gvkToPlural[registry.GVKProperty{
Group: groupName.String,
Version: versionName.String,
Kind: kindName.String,
}] = pluralName.String
}
for i, p := range provided {
if p.Plural != "" {
continue
}
plural, ok := gvkToPlural[registry.GVKProperty{
Group: p.Group,
Version: p.Version,
Kind: p.Kind,
}]
if !ok {
continue
}
provided[i].Plural = plural
}
for i, r := range required {
if r.Plural != "" {
continue
}
plural, ok := gvkToPlural[registry.GVKProperty{
Group: r.Group,
Version: r.Version,
Kind: r.Kind,
}]
if !ok {
continue
}
required[i].Plural = plural
}
return
}
func (s *SQLQuerier) GetBundleVersion(ctx context.Context, image string) (string, error) {
query := `SELECT version FROM operatorbundle WHERE bundlepath=? LIMIT 1`
rows, err := s.db.QueryContext(ctx, query, image)
if err != nil {
return "", err
}
defer rows.Close()
var version sql.NullString
if rows.Next() {
if err := rows.Scan(&version); err != nil {
return "", err
}
}
if version.Valid {
return version.String, nil
}
return "", fmt.Errorf("bundle %s not found", image)
}
func (s *SQLQuerier) GetBundlePathsForPackage(ctx context.Context, pkgName string) ([]string, error) {
query := `SELECT DISTINCT bundlepath FROM operatorbundle
INNER JOIN channel_entry ON operatorbundle.name=channel_entry.operatorbundle_name
WHERE channel_entry.package_name=?`
rows, err := s.db.QueryContext(ctx, query, pkgName)
if err != nil {
return nil, err
}
defer rows.Close()
images := []string{}
for rows.Next() {
var imgName sql.NullString
if err := rows.Scan(&imgName); err != nil {
return nil, err
}
if imgName.Valid && imgName.String != "" {
images = append(images, imgName.String)
} else {
return nil, fmt.Errorf("Index malformed: cannot find paths to bundle images")
}
}
return images, nil
}
func (s *SQLQuerier) GetBundlesForPackage(ctx context.Context, pkgName string) (map[registry.BundleKey]struct{}, error) {
query := `SELECT DISTINCT name, bundlepath, version FROM operatorbundle
INNER JOIN channel_entry ON operatorbundle.name=channel_entry.operatorbundle_name
WHERE channel_entry.package_name=?`
rows, err := s.db.QueryContext(ctx, query, pkgName)
if err != nil {
return nil, err
}
defer rows.Close()
bundles := map[registry.BundleKey]struct{}{}
for rows.Next() {
var name sql.NullString
var bundlepath sql.NullString
var version sql.NullString
if err := rows.Scan(&name, &bundlepath, &version); err != nil {
return nil, err
}
key := registry.BundleKey{}
if name.Valid && name.String != "" {
key.CsvName = name.String
}
if bundlepath.Valid && bundlepath.String != "" {
key.BundlePath = bundlepath.String
}
if version.Valid && version.String != "" {
key.Version = version.String
}
if key.IsEmpty() {
return nil, fmt.Errorf("Index malformed: cannot find identifier for bundle in package %s", pkgName)
}
bundles[key] = struct{}{}
}
return bundles, nil
}
func (s *SQLQuerier) GetDefaultChannelForPackage(ctx context.Context, pkgName string) (string, error) {
query := `SELECT DISTINCT default_channel FROM package WHERE name=? LIMIT 1`
rows, err := s.db.QueryContext(ctx, query, pkgName)
if err != nil {
return "", err
}
defer rows.Close()
var defaultChannel sql.NullString
if rows.Next() {
if err := rows.Scan(&defaultChannel); err != nil {
return "", err
}
}
if defaultChannel.Valid {
return defaultChannel.String, nil
}
return "", nil
}
func (s *SQLQuerier) ListChannels(ctx context.Context, pkgName string) ([]string, error) {
query := `SELECT DISTINCT name FROM channel WHERE channel.package_name=?`
rows, err := s.db.QueryContext(ctx, query, pkgName)
if err != nil {
return nil, err
}
defer rows.Close()
channels := []string{}
for rows.Next() {
var chName sql.NullString
if err := rows.Scan(&chName); err != nil {
return nil, err
}
if chName.Valid {
channels = append(channels, chName.String)
}
}
return channels, nil
}
func (s *SQLQuerier) GetCurrentCSVNameForChannel(ctx context.Context, pkgName, channel string) (string, error) {
query := `SELECT DISTINCT head_operatorbundle_name FROM channel WHERE channel.package_name=? AND channel.name=?`
rows, err := s.db.QueryContext(ctx, query, pkgName, channel)
if err != nil {
return "", err
}
defer rows.Close()
var csvName sql.NullString
if rows.Next() {
if err := rows.Scan(&csvName); err != nil {
return "", err
}
}
if csvName.Valid {
return csvName.String, nil
}
return "", nil
}
func (s *SQLQuerier) ListBundles(ctx context.Context) (bundles []*api.Bundle, err error) {
query := `SELECT DISTINCT channel_entry.entry_id, operatorbundle.bundle, operatorbundle.bundlepath,
channel_entry.operatorbundle_name, channel_entry.package_name, channel_entry.channel_name, operatorbundle.replaces, operatorbundle.skips,
operatorbundle.version, operatorbundle.skiprange,
dependencies.type, dependencies.value,
properties.type, properties.value
FROM channel_entry
INNER JOIN operatorbundle ON operatorbundle.name = channel_entry.operatorbundle_name
LEFT OUTER JOIN dependencies ON dependencies.operatorbundle_name = channel_entry.operatorbundle_name
LEFT OUTER JOIN properties ON properties.operatorbundle_name = channel_entry.operatorbundle_name
INNER JOIN package ON package.name = channel_entry.package_name`
rows, err := s.db.QueryContext(ctx, query)
if err != nil {
return nil, err
}
defer rows.Close()
bundles = []*api.Bundle{}
bundlesMap := map[string]*api.Bundle{}
for rows.Next() {
var entryID sql.NullInt64
var bundle sql.NullString
var bundlePath sql.NullString
var bundleName sql.NullString
var pkgName sql.NullString
var channelName sql.NullString
var replaces sql.NullString
var skips sql.NullString
var version sql.NullString
var skipRange sql.NullString
var depType sql.NullString
var depValue sql.NullString
var propType sql.NullString
var propValue sql.NullString
if err := rows.Scan(&entryID, &bundle, &bundlePath, &bundleName, &pkgName, &channelName, &replaces, &skips, &version, &skipRange, &depType, &depValue, &propType, &propValue); err != nil {
return nil, err
}
bundleKey := fmt.Sprintf("%s/%s/%s", bundleName.String, version.String, bundlePath.String)
bundleItem, ok := bundlesMap[bundleKey]
if ok {
// Create new dependency object
dep := &api.Dependency{}
if !depType.Valid || !depValue.Valid {
continue
}
dep.Type = depType.String
dep.Value = depValue.String
// Add new dependency to the existing list
existingDeps := bundleItem.Dependencies
existingDeps = append(existingDeps, dep)
bundleItem.Dependencies = existingDeps
// Create new property object
prop := &api.Property{}
if !propType.Valid || !propValue.Valid {
continue
}
prop.Type = propType.String
prop.Value = propValue.String
// Add new property to the existing list
existingProps := bundleItem.Properties
existingProps = append(existingProps, prop)
bundleItem.Properties = existingProps
} else {
// Create new bundle
out := &api.Bundle{}
if bundle.Valid && bundle.String != "" {
out, err = registry.BundleStringToAPIBundle(bundle.String)
if err != nil {
return nil, err
}
}
out.CsvName = bundleName.String
out.PackageName = pkgName.String
out.ChannelName = channelName.String
out.BundlePath = bundlePath.String
out.Version = version.String
out.SkipRange = skipRange.String
out.Replaces = replaces.String
out.Skips = strings.Split(skips.String, ",")
provided, required, err := s.GetApisForEntry(ctx, entryID.Int64)
if err != nil {
return nil, err
}
out.ProvidedApis = provided
out.RequiredApis = required
// Create new dependency and dependency list
dep := &api.Dependency{}
dependencies := []*api.Dependency{}
dep.Type = depType.String
dep.Value = depValue.String
dependencies = append(dependencies, dep)
out.Dependencies = dependencies
// Create new property and property list
prop := &api.Property{}
properties := []*api.Property{}
prop.Type = propType.String
prop.Value = propValue.String
properties = append(properties, prop)
out.Properties = properties
bundlesMap[bundleKey] = out
}
}
for _, v := range bundlesMap {
if len(v.Dependencies) > 1 {
newDeps := unique(v.Dependencies)
v.Dependencies = newDeps
}
if len(v.Properties) > 1 {
newProps := uniqueProps(v.Properties)
v.Properties = newProps
}
bundles = append(bundles, v)
}
return
}
func unique(deps []*api.Dependency) []*api.Dependency {
keys := make(map[string]bool)
list := []*api.Dependency{}
for _, entry := range deps {
depKey := fmt.Sprintf("%s/%s", entry.Type, entry.Value)
if _, value := keys[depKey]; !value {
keys[depKey] = true
list = append(list, entry)
}
}
return list
}
func uniqueProps(props []*api.Property) []*api.Property {
keys := make(map[string]bool)
list := []*api.Property{}
for _, entry := range props {
propKey := fmt.Sprintf("%s/%s", entry.Type, entry.Value)
if _, value := keys[propKey]; !value {
keys[propKey] = true
list = append(list, entry)
}
}
return list
}
func (s *SQLQuerier) GetDependenciesForBundle(ctx context.Context, name, version, path string) (dependencies []*api.Dependency, err error) {
depQuery := `SELECT DISTINCT type, value FROM dependencies
WHERE operatorbundle_name=?
AND (operatorbundle_version=? OR operatorbundle_version is NULL)
AND (operatorbundle_path=? OR operatorbundle_path is NULL)`
rows, err := s.db.QueryContext(ctx, depQuery, name, version, path)
if err != nil {
return nil, err
}
defer rows.Close()
dependencies = []*api.Dependency{}
for rows.Next() {
var typeName sql.NullString
var value sql.NullString
if err := rows.Scan(&typeName, &value); err != nil {
return nil, err
}
if !typeName.Valid || !value.Valid {
return nil, err
}
dependencies = append(dependencies, &api.Dependency{
Type: typeName.String,
Value: value.String,
})
}
return
}
func (s *SQLQuerier) GetPropertiesForBundle(ctx context.Context, name, version, path string) (properties []*api.Property, err error) {
propQuery := `SELECT DISTINCT type, value FROM properties
WHERE operatorbundle_name=?
AND (operatorbundle_version=? OR operatorbundle_version is NULL)
AND (operatorbundle_path=? OR operatorbundle_path is NULL)`
rows, err := s.db.QueryContext(ctx, propQuery, name, version, path)
if err != nil {
return nil, err
}
defer rows.Close()
properties = []*api.Property{}
for rows.Next() {
var typeName sql.NullString
var value sql.NullString
if err := rows.Scan(&typeName, &value); err != nil {
return nil, err
}
if !typeName.Valid || !value.Valid {
return nil, err
}
properties = append(properties, &api.Property{
Type: typeName.String,
Value: value.String,
})
}
return
}
| NewSQLLiteQuerier |
controller.go | /*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package composite
import (
"fmt"
"reflect"
"sync"
"time"
"github.com/golang/glog"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"metacontroller.io/apis/metacontroller/v1alpha1"
mcclientset "metacontroller.io/client/generated/clientset/internalclientset"
mclisters "metacontroller.io/client/generated/lister/metacontroller/v1alpha1"
"metacontroller.io/controller/common"
"metacontroller.io/controller/common/finalizer"
dynamicclientset "metacontroller.io/dynamic/clientset"
dynamiccontrollerref "metacontroller.io/dynamic/controllerref"
dynamicdiscovery "metacontroller.io/dynamic/discovery"
dynamicinformer "metacontroller.io/dynamic/informer"
k8s "metacontroller.io/third_party/kubernetes"
)
type parentController struct {
cc *v1alpha1.CompositeController
resources *dynamicdiscovery.ResourceMap
parentResource *dynamicdiscovery.APIResource
mcClient mcclientset.Interface
dynClient *dynamicclientset.Clientset
parentClient *dynamicclientset.ResourceClient
parentInformer *dynamicinformer.ResourceInformer
revisionLister mclisters.ControllerRevisionLister
stopCh, doneCh chan struct{}
queue workqueue.RateLimitingInterface
updateStrategy updateStrategyMap
childInformers common.InformerMap
finalizer *finalizer.Manager
}
func | (resources *dynamicdiscovery.ResourceMap, dynClient *dynamicclientset.Clientset, dynInformers *dynamicinformer.SharedInformerFactory, mcClient mcclientset.Interface, revisionLister mclisters.ControllerRevisionLister, cc *v1alpha1.CompositeController) (pc *parentController, newErr error) {
// Make a dynamic client for the parent resource.
parentClient, err := dynClient.Resource(cc.Spec.ParentResource.APIVersion, cc.Spec.ParentResource.Resource)
if err != nil {
return nil, err
}
parentResource := parentClient.APIResource
updateStrategy, err := makeUpdateStrategyMap(resources, cc)
if err != nil {
return nil, err
}
// Create informer for the parent resource.
parentInformer, err := dynInformers.Resource(cc.Spec.ParentResource.APIVersion, cc.Spec.ParentResource.Resource)
if err != nil {
return nil, fmt.Errorf("can't create informer for parent resource: %v", err)
}
// Create informers for all child resources.
childInformers := make(common.InformerMap)
defer func() {
if newErr != nil {
// If newParentController fails, Close() any informers we created
// since Stop() will never be called.
for _, childInformer := range childInformers {
childInformer.Close()
}
parentInformer.Close()
}
}()
for _, child := range cc.Spec.ChildResources {
childInformer, err := dynInformers.Resource(child.APIVersion, child.Resource)
if err != nil {
return nil, fmt.Errorf("can't create informer for child resource: %v", err)
}
childInformers.Set(child.APIVersion, child.Resource, childInformer)
}
pc = &parentController{
cc: cc,
resources: resources,
mcClient: mcClient,
dynClient: dynClient,
childInformers: childInformers,
parentClient: parentClient,
parentInformer: parentInformer,
parentResource: parentResource,
revisionLister: revisionLister,
updateStrategy: updateStrategy,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CompositeController-"+cc.Name),
finalizer: &finalizer.Manager{
Name: "metacontroller.io/compositecontroller-" + cc.Name,
Enabled: cc.Spec.Hooks.Finalize != nil,
},
}
return pc, nil
}
func (pc *parentController) Start() {
pc.stopCh = make(chan struct{})
pc.doneCh = make(chan struct{})
// Install event handlers. CompositeControllers can be created at any time,
// so we have to assume the shared informers are already running. We can't
// add event handlers in newParentController() since pc might be incomplete.
parentHandlers := cache.ResourceEventHandlerFuncs{
AddFunc: pc.enqueueParentObject,
UpdateFunc: pc.updateParentObject,
DeleteFunc: pc.enqueueParentObject,
}
if pc.cc.Spec.ResyncPeriodSeconds != nil {
// Use a custom resync period if requested. This only applies to the parent.
resyncPeriod := time.Duration(*pc.cc.Spec.ResyncPeriodSeconds) * time.Second
// Put a reasonable limit on it.
if resyncPeriod < time.Second {
resyncPeriod = time.Second
}
pc.parentInformer.Informer().AddEventHandlerWithResyncPeriod(parentHandlers, resyncPeriod)
} else {
pc.parentInformer.Informer().AddEventHandler(parentHandlers)
}
for _, childInformer := range pc.childInformers {
childInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.onChildAdd,
UpdateFunc: pc.onChildUpdate,
DeleteFunc: pc.onChildDelete,
})
}
go func() {
defer close(pc.doneCh)
defer utilruntime.HandleCrash()
glog.Infof("Starting %v CompositeController", pc.parentResource.Kind)
defer glog.Infof("Shutting down %v CompositeController", pc.parentResource.Kind)
// Wait for dynamic client and all informers.
glog.Infof("Waiting for %v CompositeController caches to sync", pc.parentResource.Kind)
syncFuncs := make([]cache.InformerSynced, 0, 2+len(pc.cc.Spec.ChildResources))
syncFuncs = append(syncFuncs, pc.dynClient.HasSynced, pc.parentInformer.Informer().HasSynced)
for _, childInformer := range pc.childInformers {
syncFuncs = append(syncFuncs, childInformer.Informer().HasSynced)
}
if !k8s.WaitForCacheSync(pc.parentResource.Kind, pc.stopCh, syncFuncs...) {
// We wait forever unless Stop() is called, so this isn't an error.
glog.Warningf("%v CompositeController cache sync never finished", pc.parentResource.Kind)
return
}
// 5 workers ought to be enough for anyone.
var wg sync.WaitGroup
for i := 0; i < 5; i++ {
wg.Add(1)
go func() {
defer wg.Done()
wait.Until(pc.worker, time.Second, pc.stopCh)
}()
}
wg.Wait()
}()
}
func (pc *parentController) Stop() {
close(pc.stopCh)
pc.queue.ShutDown()
<-pc.doneCh
// Remove event handlers and close informers for all child resources.
for _, informer := range pc.childInformers {
informer.Informer().RemoveEventHandlers()
informer.Close()
}
// Remove event handlers and close informer for the parent resource.
pc.parentInformer.Informer().RemoveEventHandlers()
pc.parentInformer.Close()
}
func (pc *parentController) worker() {
for pc.processNextWorkItem() {
}
}
func (pc *parentController) processNextWorkItem() bool {
key, quit := pc.queue.Get()
if quit {
return false
}
defer pc.queue.Done(key)
err := pc.sync(key.(string))
if err != nil {
utilruntime.HandleError(fmt.Errorf("failed to sync %v %q: %v", pc.parentResource.Kind, key, err))
pc.queue.AddRateLimited(key)
return true
}
pc.queue.Forget(key)
return true
}
func (pc *parentController) enqueueParentObject(obj interface{}) {
key, err := common.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
return
}
pc.queue.Add(key)
}
func (pc *parentController) enqueueParentObjectAfter(obj interface{}, delay time.Duration) {
key, err := common.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
return
}
pc.queue.AddAfter(key, delay)
}
func (pc *parentController) updateParentObject(old, cur interface{}) {
// We used to ignore our own status updates, but we don't anymore.
// It's sometimes necessary for a hook to see its own status updates
// so they know that the status was committed to storage.
// This could cause endless sync hot-loops if your hook always returns a
// different status (e.g. you have some incrementing counter).
// Doing that is an anti-pattern anyway because status generation should be
// idempotent if nothing meaningful has actually changed in the system.
pc.enqueueParentObject(cur)
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (pc *parentController) resolveControllerRef(childNamespace string, controllerRef *metav1.OwnerReference) *unstructured.Unstructured {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong APIGroup or Kind.
if apiGroup, _ := common.ParseAPIVersion(controllerRef.APIVersion); apiGroup != pc.parentResource.Group {
return nil
}
if controllerRef.Kind != pc.parentResource.Kind {
return nil
}
parentNamespace := ""
if pc.parentResource.Namespaced {
// If the parent is namespaced, it must be in the same namespace as the
// child because controllerRef does not support cross-namespace references
// (except for namespaced child -> cluster-scoped parent).
parentNamespace = childNamespace
}
parent, err := pc.parentInformer.Lister().Get(parentNamespace, controllerRef.Name)
if err != nil {
return nil
}
if parent.GetUID() != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return parent
}
func (pc *parentController) onChildAdd(obj interface{}) {
child := obj.(*unstructured.Unstructured)
if child.GetDeletionTimestamp() != nil {
pc.onChildDelete(child)
return
}
// If it has a ControllerRef, that's all that matters.
if controllerRef := metav1.GetControllerOf(child); controllerRef != nil {
parent := pc.resolveControllerRef(child.GetNamespace(), controllerRef)
if parent == nil {
// The controllerRef isn't a parent we know about.
return
}
glog.V(4).Infof("%v %v/%v: child %v %v created or updated", pc.parentResource.Kind, parent.GetNamespace(), parent.GetName(), child.GetKind(), child.GetName())
pc.enqueueParentObject(parent)
return
}
// Otherwise, it's an orphan. Get a list of all matching parents and sync
// them to see if anyone wants to adopt it.
parents := pc.findPotentialParents(child)
if len(parents) == 0 {
return
}
glog.V(4).Infof("%v: orphan child %v %s created or updated", pc.parentResource.Kind, child.GetKind(), child.GetName())
for _, parent := range parents {
pc.enqueueParentObject(parent)
}
}
func (pc *parentController) onChildUpdate(old, cur interface{}) {
oldChild := old.(*unstructured.Unstructured)
curChild := cur.(*unstructured.Unstructured)
// Don't sync if it's a no-op update (probably a relist/resync).
// We don't care about resyncs for children; we rely on the parent resync.
if oldChild.GetResourceVersion() == curChild.GetResourceVersion() {
return
}
// Other than that, we treat updates the same as creates.
// Level-triggered controllers shouldn't care what the old state was.
pc.onChildAdd(cur)
}
func (pc *parentController) onChildDelete(obj interface{}) {
child, ok := obj.(*unstructured.Unstructured)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %+v", obj))
return
}
child, ok = tombstone.Obj.(*unstructured.Unstructured)
if !ok {
utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not *unstructured.Unstructured %#v", obj))
return
}
}
// If it's an orphan, there's nothing to do because we never adopt orphans
// that are being deleted.
controllerRef := metav1.GetControllerOf(child)
if controllerRef == nil {
return
}
// Sync the parent of this child (if it's ours).
parent := pc.resolveControllerRef(child.GetNamespace(), controllerRef)
if parent == nil {
// The controllerRef isn't a parent we know about.
return
}
glog.V(4).Infof("%v %v/%v: child %v %v deleted", pc.parentResource.Kind, parent.GetNamespace(), parent.GetName(), child.GetKind(), child.GetName())
pc.enqueueParentObject(parent)
}
func (pc *parentController) findPotentialParents(child *unstructured.Unstructured) []*unstructured.Unstructured {
childLabels := labels.Set(child.GetLabels())
var parents []*unstructured.Unstructured
var err error
if pc.parentResource.Namespaced {
// If the parent is namespaced, it must be in the same namespace as the child.
parents, err = pc.parentInformer.Lister().ListNamespace(child.GetNamespace(), labels.Everything())
} else {
parents, err = pc.parentInformer.Lister().List(labels.Everything())
}
if err != nil {
return nil
}
var matchingParents []*unstructured.Unstructured
for _, parent := range parents {
selector, err := pc.makeSelector(parent, nil)
if err != nil || selector.Empty() {
continue
}
if selector.Matches(childLabels) {
matchingParents = append(matchingParents, parent)
}
}
return matchingParents
}
func (pc *parentController) sync(key string) error {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
glog.V(4).Infof("sync %v %v/%v", pc.parentResource.Kind, namespace, name)
parent, err := pc.parentInformer.Lister().Get(namespace, name)
if apierrors.IsNotFound(err) {
// Swallow the error since there's no point retrying if the parent is gone.
glog.V(4).Infof("%v %v/%v has been deleted", pc.parentResource.Kind, namespace, name)
return nil
}
if err != nil {
return err
}
return pc.syncParentObject(parent)
}
func (pc *parentController) syncParentObject(parent *unstructured.Unstructured) error {
// Before taking any other action, add our finalizer (if desired).
// This ensures we have a chance to clean up after any action we later take.
updatedParent, err := pc.finalizer.SyncObject(pc.parentClient, parent)
if err != nil {
// If we fail to do this, abort before doing anything else and requeue.
return fmt.Errorf("can't sync finalizer for %v %v/%v: %v", parent.GetKind(), parent.GetNamespace(), parent.GetName(), err)
}
parent = updatedParent
// Claim all matching child resources, including orphan/adopt as necessary.
observedChildren, err := pc.claimChildren(parent)
if err != nil {
return err
}
// Reconcile ControllerRevisions belonging to this parent.
// Call the sync hook for each revision, then compute the overall status and
// desired children, accounting for any rollout in progress.
syncResult, err := pc.syncRevisions(parent, observedChildren)
if err != nil {
return err
}
desiredChildren := common.MakeChildMap(parent, syncResult.Children)
// Enqueue a delayed resync, if requested.
if syncResult.ResyncAfterSeconds > 0 {
pc.enqueueParentObjectAfter(parent, time.Duration(syncResult.ResyncAfterSeconds*float64(time.Second)))
}
// If all revisions agree that they've finished finalizing,
// remove our finalizer.
if syncResult.Finalized {
updatedParent, err := pc.parentClient.Namespace(parent.GetNamespace()).RemoveFinalizer(parent, pc.finalizer.Name)
if err != nil {
return fmt.Errorf("can't remove finalizer for %v %v/%v: %v", parent.GetKind(), parent.GetNamespace(), parent.GetName(), err)
}
parent = updatedParent
}
// Enforce invariants between parent selector and child labels.
selector, err := pc.makeSelector(parent, nil)
if err != nil {
return err
}
for _, group := range desiredChildren {
for _, obj := range group {
// We don't use GetLabels() because that swallows conversion errors.
objLabels, _, err := unstructured.NestedStringMap(obj.UnstructuredContent(), "metadata", "labels")
if err != nil {
return fmt.Errorf("invalid labels on desired child %v %v/%v: %v", obj.GetKind(), obj.GetNamespace(), obj.GetName(), err)
}
// If selector generation is enabled, add the controller-uid label to all
// desired children so they match the generated selector.
if pc.cc.Spec.GenerateSelector != nil && *pc.cc.Spec.GenerateSelector {
if objLabels == nil {
objLabels = make(map[string]string, 1)
}
if _, ok := objLabels["controller-uid"]; !ok {
objLabels["controller-uid"] = string(parent.GetUID())
obj.SetLabels(objLabels)
}
}
// Make sure all desired children match the parent's selector.
// We consider it user error to try to create children that would be
// immediately orphaned.
if !selector.Matches(labels.Set(objLabels)) {
return fmt.Errorf("labels on desired child %v %v/%v don't match parent selector", obj.GetKind(), obj.GetNamespace(), obj.GetName())
}
}
}
// Reconcile child objects belonging to this parent.
// Remember manage error, but continue to update status regardless.
//
// We only manage children if the parent is "alive" (not pending deletion),
// or if it's pending deletion and we have a `finalize` hook.
var manageErr error
if parent.GetDeletionTimestamp() == nil || pc.finalizer.ShouldFinalize(parent) {
// Reconcile children.
if err := common.ManageChildren(pc.dynClient, pc.updateStrategy, parent, observedChildren, desiredChildren); err != nil {
manageErr = fmt.Errorf("can't reconcile children for %v %v/%v: %v", pc.parentResource.Kind, parent.GetNamespace(), parent.GetName(), err)
}
}
// Update parent status.
// We'll want to make sure this happens after manageChildren once we support observedGeneration.
if _, err := pc.updateParentStatus(parent, syncResult.Status); err != nil {
return fmt.Errorf("can't update status for %v %v/%v: %v", pc.parentResource.Kind, parent.GetNamespace(), parent.GetName(), err)
}
return manageErr
}
func (pc *parentController) makeSelector(parent *unstructured.Unstructured, extraMatchLabels map[string]string) (labels.Selector, error) {
labelSelector := &metav1.LabelSelector{}
if pc.cc.Spec.GenerateSelector != nil && *pc.cc.Spec.GenerateSelector {
// Select by controller-uid, like Job does.
// Any selector on the parent is ignored in this case.
labelSelector = metav1.AddLabelToSelector(labelSelector, "controller-uid", string(parent.GetUID()))
} else {
// Get the parent's LabelSelector.
if err := k8s.GetNestedFieldInto(labelSelector, parent.UnstructuredContent(), "spec", "selector"); err != nil {
return nil, fmt.Errorf("can't get label selector from %v %v/%v", pc.parentResource.Kind, parent.GetNamespace(), parent.GetName())
}
// An empty selector doesn't make sense for a CompositeController parent.
// This is likely user error, and could be dangerous (selecting everything).
if len(labelSelector.MatchLabels) == 0 && len(labelSelector.MatchExpressions) == 0 {
return nil, fmt.Errorf(".spec.selector must have either matchLabels, matchExpressions, or both")
}
}
for key, value := range extraMatchLabels {
labelSelector = metav1.AddLabelToSelector(labelSelector, key, value)
}
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
return nil, fmt.Errorf("can't convert label selector (%#v): %v", labelSelector, err)
}
return selector, nil
}
func (pc *parentController) canAdoptFunc(parent *unstructured.Unstructured) func() error {
return k8s.RecheckDeletionTimestamp(func() (metav1.Object, error) {
// Make sure this is always an uncached read.
fresh, err := pc.parentClient.Namespace(parent.GetNamespace()).Get(parent.GetName(), metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.GetUID() != parent.GetUID() {
return nil, fmt.Errorf("original %v %v/%v is gone: got uid %v, wanted %v", pc.parentResource.Kind, parent.GetNamespace(), parent.GetName(), fresh.GetUID(), parent.GetUID())
}
return fresh, nil
})
}
func (pc *parentController) claimChildren(parent *unstructured.Unstructured) (common.ChildMap, error) {
// Set up values common to all child types.
parentNamespace := parent.GetNamespace()
parentGVK := pc.parentResource.GroupVersionKind()
selector, err := pc.makeSelector(parent, nil)
if err != nil {
return nil, err
}
canAdoptFunc := pc.canAdoptFunc(parent)
// Claim all child types.
childMap := make(common.ChildMap)
for _, child := range pc.cc.Spec.ChildResources {
// List all objects of the child kind in the parent object's namespace,
// or in all namespaces if the parent is cluster-scoped.
childClient, err := pc.dynClient.Resource(child.APIVersion, child.Resource)
if err != nil {
return nil, err
}
informer := pc.childInformers.Get(child.APIVersion, child.Resource)
if informer == nil {
return nil, fmt.Errorf("no informer for resource %q in apiVersion %q", child.Resource, child.APIVersion)
}
var all []*unstructured.Unstructured
if pc.parentResource.Namespaced {
all, err = informer.Lister().ListNamespace(parentNamespace, labels.Everything())
} else {
all, err = informer.Lister().List(labels.Everything())
}
if err != nil {
return nil, fmt.Errorf("can't list %v children: %v", childClient.Kind, err)
}
// Always include the requested groups, even if there are no entries.
childMap.InitGroup(child.APIVersion, childClient.Kind)
// Handle orphan/adopt and filter by owner+selector.
crm := dynamiccontrollerref.NewUnstructuredManager(childClient, parent, selector, parentGVK, childClient.GroupVersionKind(), canAdoptFunc)
children, err := crm.ClaimChildren(all)
if err != nil {
return nil, fmt.Errorf("can't claim %v children: %v", childClient.Kind, err)
}
// Add children to map by name.
// Note that we limit each parent to only working within its own namespace.
for _, obj := range children {
childMap.Insert(parent, obj)
}
}
return childMap, nil
}
func (pc *parentController) updateParentStatus(parent *unstructured.Unstructured, status map[string]interface{}) (*unstructured.Unstructured, error) {
// Inject ObservedGeneration before comparing with old status,
// so we're comparing against the final form we desire.
if status == nil {
status = make(map[string]interface{})
}
status["observedGeneration"] = parent.GetGeneration()
// Overwrite .status field of parent object without touching other parts.
// We can't use Patch() because we need to ensure that the UID matches.
return pc.parentClient.Namespace(parent.GetNamespace()).AtomicStatusUpdate(parent, func(obj *unstructured.Unstructured) bool {
oldStatus := obj.UnstructuredContent()["status"]
if reflect.DeepEqual(oldStatus, status) {
// Nothing to do.
return false
}
obj.UnstructuredContent()["status"] = status
return true
})
}
| newParentController |
assign_cookie.go | package l
import "sort"
func findContentChildren(g []int, s []int) int {
sort.Ints(g)
sort.Ints(s)
gIndex := 0
sIndex := 0
for gIndex < len(g) && sIndex < len(s) {
if g[gIndex] < s[sIndex] {
gIndex++
}
sIndex++
}
return gIndex
}
func | (g []int, s []int) int {
sort.Ints(g)
sort.Ints(s)
gIndex := 0
sIndex := 0
for gIndex < len(g) && sIndex < len(s) {
if g[gIndex] <= s[sIndex] {
gIndex++
}
sIndex++
}
return gIndex
}
| IndContentChildren |
main.py | import string
import random
# --- Defining Variables ---
LOWER_ALPHABET = list(string.ascii_lowercase)
DIGITS = list(string.digits)
UPPER_ALPHABET = list(string.ascii_uppercase)
SYMBOLS = list(string.punctuation)
SYMBOLS_DELETE = ['"', "'", "(", ")", ",", ".", ":", ";", "[", "]", "|", "`", "{", "}"]
for x in SYMBOLS_DELETE:
SYMBOLS.remove(x)
CHAR_TYPES = [LOWER_ALPHABET, DIGITS] # characters used as default
# --- PROGRAM INTRO ---
print("""
#############################################################
# --- Password Generator --- #
#############################################################
# Language: Python #
#############################################################
# #
# This is my very first project with Python #
# Lowercase characteres and digits are used as default #
# #
#############################################################
""")
# --- LENGTH QUESTION ---
while True:
print("Password Length (Min: 8 / Max: 48):")
pass_len = input()
try:
pass_len = int(pass_len)
if pass_len >= 8 and pass_len <= 48:
break
else:
print("\nYou should insert a number between 8 and 16.\n")
except ValueError:
# In case of the user insert a value that cannot be turned into a 'int' type
print("\nYou should insert a NUMBER between 8 and 16.\n")
# --- UPPERCASE AND SYMBOLS QUESTION FUNCTION ---
def question_checker(phrase, char_type):
"""Check if the user inserts a valid value on the upper case and symbols question.
Then append the specific char type list if he answer is "Yes"
"""
while True:
print("")
print(phrase)
answer = input().strip().capitalize()
if answer == "Yes" or answer == "No":
break
else:
print("\nInvalid Value.\n")
def char_assignment(char_check, char_type):
if char_check == "Yes":
return CHAR_TYPES.append(char_type)
else:
pass
char_assignment(answer, char_type)
# --- ASSIGNING UPPERCASE AND/OR SYMBOLS CHARACTERS INTO THE CHAR_TYPES LIST. ---
question_checker("Do you want uppercase letters? [Yes/No]", UPPER_ALPHABET)
question_checker("Do you want symbols? [Yes/No]", SYMBOLS)
# --- CREATE THE PASSWORD ---
def create_password():
password_list = []
for x in range(len(CHAR_TYPES)):
password_list.append(CHAR_TYPES[x][random.randrange(len(CHAR_TYPES[x]))]) # making at least one of all the char types appear in the password
for x in range(pass_len - len(CHAR_TYPES)):
random_chartype = random.randrange(len(CHAR_TYPES))
password_list.append(CHAR_TYPES[random_chartype][random.randrange(len(CHAR_TYPES[random_chartype]))]) # the spaces that remained will be filled with random characteres
random.shuffle(password_list)
password = "".join(password_list)
return password
# --- SHOW OUTPUT ---
def show_password():
|
show_password()
# --- REMAKE THE PASSWORD ---
while True:
print("Remake the password? [Yes/No]")
answer = input().strip().capitalize()
if answer == "Yes" or answer == "No":
if answer == "Yes":
show_password()
else:
print("\n")
break
else:
print("\nInvalid Value.\n") | print("\n")
print(f"Password: {create_password()} ")
print("\n") |
format.go | package log
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unicode/utf8"
)
const (
timeFormat = "2006-01-02T15:04:05-0700"
termTimeFormat = "01-02|15:04:05"
floatFormat = 'f'
termMsgJust = 40
)
// locationTrims are trimmed for display to avoid unwieldy log lines.
var (
locationTrims = []string{
"github.com/ProtonFoundation/Proton/",
}
is_debug = false
)
func IsDebug() bool {
return is_debug
}
// PrintOrigins sets or unsets log location (file:line) printing for terminal
// format output.
func PrintOrigins(print bool) {
is_debug = print
if print {
atomic.StoreUint32(&locationEnabled, 1)
} else {
atomic.StoreUint32(&locationEnabled, 0)
}
}
// locationEnabled is an atomic flag controlling whether the terminal formatter
// should append the log locations too when printing entries.
var locationEnabled uint32
// locationLength is the maxmimum path length encountered, which all logs are
// padded to to aid in alignment.
var locationLength uint32
// fieldPadding is a global map with maximum field value lengths seen until now
// to allow padding log contexts in a bit smarter way.
var fieldPadding = make(map[string]int)
// fieldPaddingLock is a global mutex protecting the field padding map.
var fieldPaddingLock sync.RWMutex
type Format interface {
Format(r *Record) []byte
}
// FormatFunc returns a new Format object which uses
// the given function to perform record formatting.
func FormatFunc(f func(*Record) []byte) Format {
return formatFunc(f)
}
type formatFunc func(*Record) []byte
func (f formatFunc) Format(r *Record) []byte {
return f(r)
}
// TerminalStringer is an analogous interface to the stdlib stringer, allowing
// own types to have custom shortened serialization formats when printed to the
// screen.
type TerminalStringer interface {
TerminalString() string
}
// TerminalFormat formats log records optimized for human readability on
// a terminal with color-coded level output and terser human friendly timestamp.
// This format should only be used for interactive programs or while developing.
//
// [TIME] [LEVEL] MESAGE key=value key=value ...
//
// Example:
//
// [May 16 20:58:45] [DBUG] remove route ns=haproxy addr=127.0.0.1:50002
//
func TerminalFormat(usecolor bool) Format |
// LogfmtFormat prints records in logfmt format, an easy machine-parseable but human-readable
// format for key/value pairs.
//
// For more details see: http://godoc.org/github.com/kr/logfmt
//
func LogfmtFormat() Format {
return FormatFunc(func(r *Record) []byte {
common := []interface{}{r.KeyNames.Time, r.Time, r.KeyNames.Lvl, r.Lvl, r.KeyNames.Msg, r.Msg}
buf := &bytes.Buffer{}
logfmt(buf, append(common, r.Ctx...), 0, false)
return buf.Bytes()
})
}
func logfmt(buf *bytes.Buffer, ctx []interface{}, color int, term bool) {
for i := 0; i < len(ctx); i += 2 {
if i != 0 {
buf.WriteByte(' ')
}
k, ok := ctx[i].(string)
v := formatLogfmtValue(ctx[i+1], term)
if !ok {
k, v = errorKey, formatLogfmtValue(k, term)
}
// XXX: we should probably check that all of your key bytes aren't invalid
fieldPaddingLock.RLock()
padding := fieldPadding[k]
fieldPaddingLock.RUnlock()
length := utf8.RuneCountInString(v)
if padding < length {
padding = length
fieldPaddingLock.Lock()
fieldPadding[k] = padding
fieldPaddingLock.Unlock()
}
if color > 0 {
fmt.Fprintf(buf, "\x1b[%dm%s\x1b[0m=", color, k)
} else {
buf.WriteString(k)
buf.WriteByte('=')
}
buf.WriteString(v)
if i < len(ctx)-2 {
buf.Write(bytes.Repeat([]byte{' '}, padding-length))
}
}
buf.WriteByte('\n')
}
// JsonFormat formats log records as JSON objects separated by newlines.
// It is the equivalent of JsonFormatEx(false, true).
func JsonFormat() Format {
return JsonFormatEx(false, true)
}
// JsonFormatEx formats log records as JSON objects. If pretty is true,
// records will be pretty-printed. If lineSeparated is true, records
// will be logged with a new line between each record.
func JsonFormatEx(pretty, lineSeparated bool) Format {
jsonMarshal := json.Marshal
if pretty {
jsonMarshal = func(v interface{}) ([]byte, error) {
return json.MarshalIndent(v, "", " ")
}
}
return FormatFunc(func(r *Record) []byte {
props := make(map[string]interface{})
props[r.KeyNames.Time] = r.Time
props[r.KeyNames.Lvl] = r.Lvl.String()
props[r.KeyNames.Msg] = r.Msg
for i := 0; i < len(r.Ctx); i += 2 {
k, ok := r.Ctx[i].(string)
if !ok {
props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i])
}
props[k] = formatJsonValue(r.Ctx[i+1])
}
b, err := jsonMarshal(props)
if err != nil {
b, _ = jsonMarshal(map[string]string{
errorKey: err.Error(),
})
return b
}
if lineSeparated {
b = append(b, '\n')
}
return b
})
}
func formatShared(value interface{}) (result interface{}) {
defer func() {
if err := recover(); err != nil {
if v := reflect.ValueOf(value); v.Kind() == reflect.Ptr && v.IsNil() {
result = "nil"
} else {
panic(err)
}
}
}()
switch v := value.(type) {
case time.Time:
return v.Format(timeFormat)
case error:
return v.Error()
case fmt.Stringer:
return v.String()
default:
return v
}
}
func formatJsonValue(value interface{}) interface{} {
value = formatShared(value)
switch value.(type) {
case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string:
return value
default:
return fmt.Sprintf("%+v", value)
}
}
// formatValue formats a value for serialization
func formatLogfmtValue(value interface{}, term bool) string {
if value == nil {
return "nil"
}
if t, ok := value.(time.Time); ok {
// Performance optimization: No need for escaping since the provided
// timeFormat doesn't have any escape characters, and escaping is
// expensive.
return t.Format(timeFormat)
}
if term {
if s, ok := value.(TerminalStringer); ok {
// Custom terminal stringer provided, use that
return escapeString(s.TerminalString())
}
}
value = formatShared(value)
switch v := value.(type) {
case bool:
return strconv.FormatBool(v)
case float32:
return strconv.FormatFloat(float64(v), floatFormat, 3, 64)
case float64:
return strconv.FormatFloat(v, floatFormat, 3, 64)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64:
return fmt.Sprintf("%d", value)
case string:
return escapeString(v)
default:
return escapeString(fmt.Sprintf("%+v", value))
}
}
var stringBufPool = sync.Pool{
New: func() interface{} { return new(bytes.Buffer) },
}
func escapeString(s string) string {
needsQuotes := false
needsEscape := false
for _, r := range s {
if r <= ' ' || r == '=' || r == '"' {
needsQuotes = true
}
if r == '\\' || r == '"' || r == '\n' || r == '\r' || r == '\t' {
needsEscape = true
}
}
if !needsEscape && !needsQuotes {
return s
}
e := stringBufPool.Get().(*bytes.Buffer)
e.WriteByte('"')
for _, r := range s {
switch r {
case '\\', '"':
e.WriteByte('\\')
e.WriteByte(byte(r))
case '\n':
e.WriteString("\\n")
case '\r':
e.WriteString("\\r")
case '\t':
e.WriteString("\\t")
default:
e.WriteRune(r)
}
}
e.WriteByte('"')
var ret string
if needsQuotes {
ret = e.String()
} else {
ret = string(e.Bytes()[1: e.Len()-1])
}
e.Reset()
stringBufPool.Put(e)
return ret
}
| {
return FormatFunc(func(r *Record) []byte {
var color = 0
if usecolor {
switch r.Lvl {
case LvlCrit:
color = 35
case LvlError:
color = 31
case LvlWarn:
color = 33
case LvlInfo:
color = 32
case LvlDebug:
color = 36
case LvlTrace:
color = 34
}
}
b := &bytes.Buffer{}
lvl := r.Lvl.AlignedString()
if atomic.LoadUint32(&locationEnabled) != 0 {
// Log origin printing was requested, format the location path and line number
location := fmt.Sprintf("%+v", r.Call)
for _, prefix := range locationTrims {
location = strings.TrimPrefix(location, prefix)
}
// Maintain the maximum location length for fancyer alignment
align := int(atomic.LoadUint32(&locationLength))
if align < len(location) {
align = len(location)
atomic.StoreUint32(&locationLength, uint32(align))
}
padding := strings.Repeat(" ", align-len(location))
// Assemble and print the log heading
if color > 0 {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s|%s]%s %s ", color, lvl, r.Time.Format(termTimeFormat), location, padding, r.Msg)
} else {
fmt.Fprintf(b, "%s[%s|%s]%s %s ", lvl, r.Time.Format(termTimeFormat), location, padding, r.Msg)
}
} else {
if color > 0 {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %s ", color, lvl, r.Time.Format(termTimeFormat), r.Msg)
} else {
fmt.Fprintf(b, "%s[%s] %s ", lvl, r.Time.Format(termTimeFormat), r.Msg)
}
}
// try to justify the log output for short messages
length := utf8.RuneCountInString(r.Msg)
if len(r.Ctx) > 0 && length < termMsgJust {
b.Write(bytes.Repeat([]byte{' '}, termMsgJust-length))
}
// print the keys logfmt style
logfmt(b, r.Ctx, color, true)
return b.Bytes()
})
} |
scripts.js | //business logic
function Contact(first, last) {
this.firstName = first;
this.lastName = last;
this.addresses = [];
}
function Address(street, city, state) {
this.street = street;
this.city = city;
this.state = state;
}
Contact.prototype.fullName = function() {
return this.firstName + " " + this.lastName;
}
Address.prototype.fullAddress = function() {
return this.street + ", " + this.city + ", " + this.state;
}
function resetFields() {
$("input#new-first-name").val("");
$("input#new-last-name").val("");
$("input.new-street").val("");
$("input.new-city").val("");
$("input.new-state").val("");
} | $("#add-address").click(function() {
$("#new-addresses").append('<div class="new-address">' +
'<div class="form-group">' +
'<label for="new-street">Street</label>' +
'<input type="text" class="form-control new-street">' +
'</div>' +
'<div class="form-group">' +
'<label for="new-city">City</label>' +
'<input type="text" class="form-control new-city">' +
'</div>' +
'<div class="form-group">' +
'<label for="new-state">State</label>' +
'<input type="text" class="form-control new-state">' +
'</div>' +
'</div>');
});
$("form#new-contact").submit(function(event) {
event.preventDefault();
var inputtedFirstName = $("input#new-first-name").val();
var inputtedLastName = $("input#new-last-name").val();
var newContact = new Contact(inputtedFirstName, inputtedLastName);
$(".new-address").each(function() {
var inputtedStreet = $(this).find("input.new-street").val();
var inputtedCity = $(this).find("input.new-city").val();
var inputtedState = $(this).find("input.new-state").val();
var newAddress = new Address(inputtedStreet, inputtedCity, inputtedState)
newContact.addresses.push(newAddress)
});
$("ul#contacts").append("<li><span class='contact'>" + newContact.fullName() + "</span></li>");
$(".contact").last().click(function() {
$("#show-contact").show();
$("#show-contact h2").text(newContact.fullName());
$(".first-name").text(newContact.firstName);
$(".last-name").text(newContact.lastName);
$("ul#addresses").text("");
newContact.addresses.forEach(function(address) {
$("ul#addresses").append("<li>" + address.fullAddress() + "</li>");
});
});
resetFields();
});
}); |
// user interface logic
$(document).ready(function() {
|
update.rs | use query_engine_tests::*;
#[test_suite]
mod update {
use indoc::indoc;
use query_engine_tests::{assert_error, run_query, run_query_json, TROUBLE_CHARS};
fn schema_1() -> String {
let schema = indoc! {
r#"model TestModel {
#id(id, Int, @id)
optString String?
optInt Int?
optFloat Float?
optBoolean Boolean?
optDateTime DateTime?
}"#
};
schema.to_owned()
}
fn schema_2() -> String {
let schema = indoc! {
r#"model TestModel {
#id(id, Int, @id)
strField String
uniqField String? @unique
}"#
};
schema.to_owned()
}
fn schema_3() -> String {
let schema = indoc! {
r#"model TestModel {
#id(id, Int, @id)
optEnum MyEnum?
}
enum MyEnum {
A
ABCD
}"#
};
schema.to_owned()
}
fn schema_4() -> String {
let schema = indoc! {
r#"model TestModel {
#id(id, Int, @id)
field String
updatedAt DateTime @updatedAt
createdAt DateTime @default(now())
}"#
};
schema.to_owned()
}
fn schema_5() -> String {
let schema = indoc! {
r#"model TestModel {
#id(id, Int, @id)
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
}"#
};
schema.to_owned()
}
fn schema_6() -> String {
let schema = indoc! {
r#"model TestModel {
#id(id, Int, @id)
optInt Int?
optFloat Float?
}"#
};
schema.to_owned()
}
fn schema_7() -> String {
let schema = indoc! {
r#"model TestModel {
id1 Float
id2 Int
uniq Int @unique
@@id([id1, id2])
}"#
};
schema.to_owned()
}
//"An updateOne mutation" should "update an item"
#[connector_test(schema(schema_1))]
async fn update_an_item(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1 }"#).await?;
insta::assert_snapshot!(
run_query!(&runner, format!(r#"mutation {{
updateOneTestModel(
where: {{ id: 1 }}
data: {{
optString: {{ set: "test{}" }}
optInt: {{ set: 1337 }}
optFloat: {{ set: 1.234 }}
optBoolean: {{ set: true }}
optDateTime: {{ set: "2016-07-31T23:59:01.000Z" }}
}}
) {{
optString
optInt
optFloat
optBoolean
optDateTime
}}
}}"#, TROUBLE_CHARS)),
@r###"{"data":{"updateOneTestModel":{"optString":"test¥฿😀😁😂😃😄😅😆😇😈😉😊😋😌😍😎😏😐😑😒😓😔😕😖😗😘😙😚😛😜😝😞😟😠😡😢😣😤😥😦😧😨😩😪😫😬😭😮😯😰😱😲😳😴😵😶😷😸😹😺😻😼😽😾😿🙀🙁🙂🙃🙄🙅🙆🙇🙈🙉🙊🙋🙌🙍🙎🙏ऀँंःऄअआइईउऊऋऌऍऎएऐऑऒओऔकखगघङचछजझञटठडढणतथदधनऩपफबभमयर€₭₮₯₰₱₲₳₴₵₶₷₸₹₺₻₼₽₾₿⃀","optInt":1337,"optFloat":1.234,"optBoolean":true,"optDateTime":"2016-07-31T23:59:01.000Z"}}}"###
);
insta::assert_snapshot!(
run_query!(&runner, r#"{ findManyTestModel { id } }"#),
@r###"{"data":{"findManyTestModel":[{"id":1}]}}"###
);
Ok(())
}
// "An updateOne mutation" should "update an item with shorthand notation"
#[connector_test(schema(schema_1))]
async fn update_with_shorthand_notation(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1 }"#).await?;
insta::assert_snapshot!(
run_query!(&runner, format!(r#"mutation {{
updateOneTestModel(
where: {{ id: 1 }}
data: {{
optString: "test{}",
optInt: 1337,
optFloat: 1.234,
optBoolean: true,
optDateTime: "2016-07-31T23:59:01.000Z",
}}
) {{
optString
optInt
optFloat
optBoolean
optDateTime
}}
}}"#, TROUBLE_CHARS)),
@r###"{"data":{"updateOneTestModel":{"optString":"test¥฿😀😁😂😃😄😅😆😇😈😉😊😋😌😍😎😏😐😑😒😓😔😕😖😗😘😙😚😛😜😝😞😟😠😡😢😣😤😥😦😧😨😩😪😫😬😭😮😯😰😱😲😳😴😵😶😷😸😹😺😻😼😽😾😿🙀🙁🙂🙃🙄🙅🙆🙇🙈🙉🙊🙋🙌🙍🙎🙏ऀँंःऄअआइईउऊऋऌऍऎएऐऑऒओऔकखगघङचछजझञटठडढणतथदधनऩपफबभमयर€₭₮₯₰₱₲₳₴₵₶₷₸₹₺₻₼₽₾₿⃀","optInt":1337,"optFloat":1.234,"optBoolean":true,"optDateTime":"2016-07-31T23:59:01.000Z"}}}"###
);
insta::assert_snapshot!(
run_query!(&runner, r#"{ findManyTestModel { id } }"#),
@r###"{"data":{"findManyTestModel":[{"id":1}]}}"###
);
Ok(())
}
// "An updateOne mutation" should "update an item by a unique field"
#[connector_test(schema(schema_2))]
async fn update_by_uniq_field(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1, strField: "test", uniqField: "uniq"}"#).await?;
insta::assert_snapshot!(
run_query!(&runner, r#"mutation {
updateOneTestModel(
where: { uniqField: "uniq" }
data: { strField: { set: "updated" } }
){
strField
}
}"#),
@r###"{"data":{"updateOneTestModel":{"strField":"updated"}}}"###
);
Ok(())
}
// "An updateOne mutation" should "update enums"
#[connector_test(schema(schema_3), capabilities(Enums))]
async fn update_enums(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1 }"#).await?;
insta::assert_snapshot!(
run_query!(&runner, r#"mutation {
updateOneTestModel(
where: { id: 1 }
data: { optEnum: { set: A } }
) {
optEnum
}
}"#),
@r###"{"data":{"updateOneTestModel":{"optEnum":"A"}}}"###
);
Ok(())
}
// "An updateOne mutation" should "gracefully fail when trying to update an item by a unique field with a non-existing value"
#[connector_test(schema(schema_2))]
async fn update_fail_uniq_field_inexistant_value(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1, strField: "test", uniqField: "uniq"}"#).await?;
assert_error!(
runner,
r#"mutation {
updateOneTestModel(
where: { uniqField: "doesn't exist" }
data: { strField: { set: "updated" } }
){
id
}
}"#,
2025,
"An operation failed because it depends on one or more records that were required but not found. Record to update not found."
);
Ok(())
}
// "An updateOne mutation" should "update an updatedAt datetime"
#[connector_test(schema(schema_4))]
async fn update_updated_at_datetime(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1, field: "test"}"#).await?;
let res = run_query_json!(
&runner,
r#"mutation {
updateOneTestModel(
where: { id: 1 }
data: { field: { set: "test2" } }
){
createdAt
updatedAt
}
}"#
);
let created_at = &res["data"]["updateOneTestModel"]["createdAt"].to_string();
let updated_at = &res["data"]["updateOneTestModel"]["updatedAt"].to_string();
assert_ne!(created_at, updated_at);
Ok(())
}
// "UpdatedAt and createdAt" should "be mutable with an update"
#[connector_test(schema(schema_5))]
async fn updated_created_at_mutable_with_update(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1, }"#).await?;
insta::assert_snapshot!(
run_query!(&runner, r#"mutation {
updateOneTestModel(
where: { id: 1 }
data: {
createdAt: { set: "2000-01-01T00:00:00Z" }
updatedAt: { set: "2001-01-01T00:00:00Z" }
}
) {
createdAt
updatedAt
}
}"#),
@r###"{"data":{"updateOneTestModel":{"createdAt":"2000-01-01T00:00:00.000Z","updatedAt":"2001-01-01T00:00:00.000Z"}}}"###
);
Ok(())
}
// "An updateOne mutation" should "correctly apply all number operations for Int"
// TODO(dom): Not working on Mongo (first snapshot)
// -{"data":{"updateOneTestModel":{"optInt":null}}}
// +{"data":{"updateOneTestModel":{"optInt":10}}}
#[connector_test(schema(schema_6), exclude(CockroachDb))]
async fn update_apply_number_ops_for_int(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1 }"#).await?;
create_row(&runner, r#"{ id: 2, optInt: 3}"#).await?;
// Increment
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "increment", "10").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "increment", "10").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":13}}}"###
);
// Decrement
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "decrement", "10").await?,
@r###"{"data":{"updateO | ###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "decrement", "10").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":3}}}"###
);
// Multiply
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "multiply", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "multiply", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":6}}}"###
);
// Divide
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "divide", "3").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "divide", "3").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":2}}}"###
);
// Set
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "set", "5").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":5}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "set", "5").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":5}}}"###
);
// Set null
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "set", "null").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "set", "null").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
Ok(())
}
// CockroachDB does not support the "divide" operator as is.
// See https://github.com/cockroachdb/cockroach/issues/41448.
#[connector_test(schema(schema_6), only(CockroachDb))]
async fn update_apply_number_ops_for_int_cockroach(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1 }"#).await?;
create_row(&runner, r#"{ id: 2, optInt: 3}"#).await?;
// Increment
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "increment", "10").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "increment", "10").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":13}}}"###
);
// Decrement
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "decrement", "10").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "decrement", "10").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":3}}}"###
);
// Multiply
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "multiply", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "multiply", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":6}}}"###
);
// Set
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "set", "5").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":5}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "set", "5").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":5}}}"###
);
// Set null
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optInt", "set", "null").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optInt", "set", "null").await?,
@r###"{"data":{"updateOneTestModel":{"optInt":null}}}"###
);
Ok(())
}
// "An updateOne mutation" should "correctly apply all number operations for Float"
#[connector_test(schema(schema_6), exclude(MongoDb))]
async fn update_apply_number_ops_for_float(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1 }"#).await?;
create_row(&runner, r#"{ id: 2, optFloat: 5.5}"#).await?;
// Increment
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "increment", "4.6").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "increment", "4.6").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":10.1}}}"###
);
// Decrement
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "decrement", "4.6").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "decrement", "4.6").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":5.5}}}"###
);
// Multiply
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "multiply", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "multiply", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":11.0}}}"###
);
// Divide
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "divide", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "divide", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":5.5}}}"###
);
// Set
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "set", "5.1").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":5.1}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "set", "5.1").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":5.1}}}"###
);
// Set null
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "set", "null").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "set", "null").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
Ok(())
}
// TODO(mongo, precision): Suffers from precision issues on Float
// These precision issues should be gone once the floating point fixes effort is done
// Note: These precision issues are created within Prisma's MongoDB connector, not within MongoDB.
#[connector_test(schema(schema_6), only(MongoDb))]
async fn update_apply_number_ops_for_float_mongo(runner: Runner) -> TestResult<()> {
create_row(&runner, r#"{ id: 1 }"#).await?;
create_row(&runner, r#"{ id: 2, optFloat: 5.5}"#).await?;
// Increment
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "increment", "4.6").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "increment", "4.6").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":10.1}}}"###
);
// Decrement
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "decrement", "4.6").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "decrement", "4.6").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":5.500000000000001}}}"###
);
// Multiply
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "multiply", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "multiply", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":11.0}}}"###
);
// Divide
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "divide", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "divide", "2").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":5.500000000000001}}}"###
);
// Set
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "set", "5.1").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":5.100000000000001}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "set", "5.1").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":5.100000000000001}}}"###
);
// Set null
insta::assert_snapshot!(
query_number_operation(&runner, "1", "optFloat", "set", "null").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
insta::assert_snapshot!(
query_number_operation(&runner, "2", "optFloat", "set", "null").await?,
@r###"{"data":{"updateOneTestModel":{"optFloat":null}}}"###
);
Ok(())
}
// "An updateOne mutation with number operations" should "handle id changes correctly"
#[connector_test(schema(schema_7), capabilities(CompoundIds))]
async fn update_number_ops_handle_id_change(runner: Runner) -> TestResult<()> {
run_query!(
&runner,
r#"mutation { createOneTestModel(data: { id1: 1.23456, id2: 2, uniq: 3 }) { id1 } }"#
);
insta::assert_snapshot!(
run_query!(&runner, r#"mutation {
updateOneTestModel(
where: { id1_id2: { id1: 1.23456, id2: 2 } }
data: {
id1: { divide: 2 }
uniq: { multiply: 3 }
}
){
id1
id2
uniq
}
}"#),
@r###"{"data":{"updateOneTestModel":{"id1":0.61728,"id2":2,"uniq":9}}}"###
);
Ok(())
}
async fn query_number_operation(
runner: &Runner,
id: &str,
field: &str,
op: &str,
value: &str,
) -> TestResult<String> {
let res = run_query!(
runner,
format!(
r#"mutation {{
updateOneTestModel(
where: {{ id: {} }}
data: {{ {}: {{ {}: {} }} }}
){{
{}
}}
}}"#,
id, field, op, value, field
)
);
Ok(res)
}
async fn create_row(runner: &Runner, data: &str) -> TestResult<()> {
runner
.query(format!("mutation {{ createOneTestModel(data: {}) {{ id }} }}", data))
.await?
.assert_success();
Ok(())
}
}
#[test_suite(schema(json_opt), exclude(MySql(5.6)), capabilities(Json))]
mod json_update {
use query_engine_tests::{assert_error, run_query};
#[connector_test(only(MongoDb))]
async fn update_json(runner: Runner) -> TestResult<()> {
insta::assert_snapshot!(
run_query!(&runner, r#"mutation { createOneTestModel(data: { id: 1 }) { json }}"#),
@r###"{"data":{"createOneTestModel":{"json":null}}}"###
);
insta::assert_snapshot!(
run_query!(&runner, r#"mutation { updateOneTestModel(where: { id: 1 }, data: { json: "{}" }) { json }}"#),
@r###"{"data":{"updateOneTestModel":{"json":"{}"}}}"###
);
insta::assert_snapshot!(
run_query!(&runner, r#"mutation { updateOneTestModel(where: { id: 1 }, data: { json: "null" }) { json }}"#),
@r###"{"data":{"updateOneTestModel":{"json":null}}}"###
);
insta::assert_snapshot!(
run_query!(&runner, r#"mutation { updateOneTestModel(where: { id: 1 }, data: { json: null }) { json }}"#),
@r###"{"data":{"updateOneTestModel":{"json":null}}}"###
);
Ok(())
}
#[connector_test(capabilities(AdvancedJsonNullability))]
async fn update_json_adv(runner: Runner) -> TestResult<()> {
insta::assert_snapshot!(
run_query!(&runner, r#"mutation { createOneTestModel(data: { id: 1 }) { json }}"#),
@r###"{"data":{"createOneTestModel":{"json":null}}}"###
);
insta::assert_snapshot!(
run_query!(&runner, r#"mutation { updateOneTestModel(where: { id: 1 }, data: { json: "{}" }) { json }}"#),
@r###"{"data":{"updateOneTestModel":{"json":"{}"}}}"###
);
insta::assert_snapshot!(
run_query!(&runner, r#"mutation { updateOneTestModel(where: { id: 1 }, data: { json: JsonNull }) { json }}"#),
@r###"{"data":{"updateOneTestModel":{"json":"null"}}}"###
);
insta::assert_snapshot!(
run_query!(&runner, r#"mutation { updateOneTestModel(where: { id: 1 }, data: { json: DbNull }) { json }}"#),
@r###"{"data":{"updateOneTestModel":{"json":null}}}"###
);
Ok(())
}
#[connector_test(capabilities(AdvancedJsonNullability))]
async fn update_json_errors(runner: Runner) -> TestResult<()> {
assert_error!(
&runner,
r#"mutation {
updateOneTestModel(where: { id: 1 }, data: { json: null }) {
json
}
}"#,
2009,
"A value is required but not set."
);
assert_error!(
&runner,
r#"mutation {
updateOneTestModel(where: { id: 1 }, data: { json: AnyNull }) {
id
}
}"#,
2009,
"Value types mismatch. Have: Enum(\"AnyNull\")"
);
Ok(())
}
}
| neTestModel":{"optInt":null}}}" |
constants.py | __author__ = 'Matthew Witherwax (lemoneer)'
class Constant(object):
|
BAUD_RATE = Constant(BAUD_300=0, BAUD_600=1, BAUD_1200=2, BAUD_2400=3, BAUD_4800=4, BAUD_9600=5, BAUD_14400=6,
BAUD_19200=7, BAUD_28800=8, BAUD_38400=9, BAUD_57600=10, BAUD_115200=11, DEFAULT=11)
DAYS = Constant(SUNDAY=0x01, MONDAY=0x02, TUESDAY=0x04, WEDNESDAY=0x08, THURSDAY=0x10, FRIDAY=0x20,
SATURDAY=0x40)
DRIVE = Constant(STRAIGHT=0x8000, STRAIGHT_ALT=0x7FFF, TURN_IN_PLACE_CW=0xFFFF, TURN_IN_PLACE_CCW=0x0001)
MOTORS = Constant(SIDE_BRUSH=0x01, VACUUM=0x02, MAIN_BRUSH=0x04, SIDE_BRUSH_DIRECTION=0x08,
MAIN_BRUSH_DIRECTION=0x10)
LEDS = Constant(DEBRIS=0x01, SPOT=0x02, DOCK=0x04, CHECK_ROBOT=0x08)
WEEKDAY_LEDS = Constant(SUNDAY=0x01, MONDAY=0x02, TUESDAY=0x04, WEDNESDAY=0x08, THURSDAY=0x10, FRIDAY=0x20,
SATURDAY=0x40)
SCHEDULING_LEDS = Constant(COLON=0x01, PM=0x02, AM=0x04, CLOCK=0x08, SCHEDULE=0x10)
RAW_LED = Constant(A=0x01, B=0x02, C=0x04, D=0x08, E=0x10, F=0x20, G=0x40)
BUTTONS = Constant(CLEAN=0x01, SPOT=0x02, DOCK=0x04, MINUTE=0x08, HOUR=0x10, DAY=0x20, SCHEDULE=0x40,
CLOCK=0x80)
ROBOT = Constant(TICK_PER_REV=508.8, WHEEL_DIAMETER=72, WHEEL_BASE=235,
TICK_TO_DISTANCE=0.44456499814949904317867595046408)
MODES = Constant(OFF=0, PASSIVE=1, SAFE=2, FULL=3)
WHEEL_OVERCURRENT = Constant(SIDE_BRUSH=0x01, MAIN_BRUSH=0x02, RIGHT_WHEEL=0x04, LEFT_WHEEL=0x08)
BUMPS_WHEEL_DROPS = Constant(BUMP_RIGHT=0x01, BUMP_LEFT=0x02, WHEEL_DROP_RIGHT=0x04, WHEEL_DROP_LEFT=0x08)
CHARGE_SOURCE = Constant(INTERNAL=0x01, HOME_BASE=0x02)
LIGHT_BUMPER = Constant(LEFT=0x01, FRONT_LEFT=0x02, CENTER_LEFT=0x04, CENTER_RIGHT=0x08, FRONT_RIGHT=0x10,
RIGHT=0x20)
STASIS = Constant(TOGGLING=0x01, DISABLED=0x02)
POWER_SAVE_TIME = 300 # seconds
RESPONSE_SIZES = {0: 26, 1: 10, 2: 6, 3: 10, 4: 14, 5: 12, 6: 52,
# actual sensors
7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 12: 1, 13: 1, 14: 1, 15: 1, 16: 1, 17: 1, 18: 1, 19: 2, 20: 2, 21: 1,
22: 2, 23: 2, 24: 1, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2, 32: 3, 33: 3, 34: 1, 35: 1,
36: 1, 37: 1, 38: 1, 39: 2, 40: 2, 41: 2, 42: 2, 43: 2, 44: 2, 45: 1, 46: 2, 47: 2, 48: 2, 49: 2,
50: 2, 51: 2, 52: 1, 53: 1, 54: 2, 55: 2, 56: 2, 57: 2, 58: 1,
# end actual sensors
100: 80, 101: 28, 106: 12, 107: 9}
| def __init__(self, **kwds):
self.__dict__.update(kwds) |
__init__.py |
from __future__ import division
import os.path
def write_disable_nomenclature_errors (f) :
f.write("try :\n")
f.write(" set_nomenclature_errors_on_read(\"ignore\")\n")
f.write("except Exception :\n")
f.write(" pass\n")
def create_refinement_view_script (
mtz_file_name,
pdb_file_name,
coot_script_name="view_in_coot.py",
work_dir=None,
show_symmetry=True,
peaks_file_name=None,
bad_ligand_list=None,
placed_ligand_list=None) :
from iotbx.file_reader import any_file
from libtbx.utils import concatenate_python_script
import libtbx.load_env
have_anom_map = False
have_anom_residual_map = False
mtz_in = any_file(mtz_file_name).assert_file_type("hkl")
have_anom_map = have_residual_map = False
for array in mtz_in.file_server.miller_arrays :
labels = array.info().labels
if ("ANOM" in labels) :
have_anom_map = True
elif ("ANOMDIFF" in labels) :
have_anom_residual_map = True
f = open(coot_script_name, "w")
print >> f, "import coot"
print >> f, "import os"
write_disable_nomenclature_errors(f)
load_script = libtbx.env.find_in_repositories(
relative_path="cctbx_project/cootbx/view_refinement.py",
test=os.path.isfile)
assert (load_script is not None)
concatenate_python_script(out=f, file_name=load_script)
zoom_ligand_script = libtbx.env.find_in_repositories(
relative_path="cctbx_project/cootbx/simple_zoom_list.py",
test=os.path.isfile)
concatenate_python_script(out=f, file_name=zoom_ligand_script)
if (work_dir is not None) :
pdb_file_name = os.path.basename(pdb_file_name)
mtz_file_name = os.path.basename(mtz_file_name)
f.write("""load_refinement(\n""")
f.write("""pdb_file="%s",\n""" % pdb_file_name)
f.write("""map_file="%s",\n""" % mtz_file_name)
f.write("""show_symmetry=%s,\n""" % show_symmetry)
f.write("""have_anom_map=%s,\n""" % have_anom_map)
f.write("""have_residual_map=%s,\n""" % have_residual_map)
if (work_dir is not None) :
f.write("""work_dir="%s",\n""" % work_dir)
if (peaks_file_name is not None) :
|
f.write(")\n")
if (bad_ligand_list is not None) and (len(bad_ligand_list) > 0) :
print >> f, """draw_simple_zoom_list("""
print >> f, """ title="Residues in suspicious density","""
print >> f, """ items=%s)""" % str(bad_ligand_list)
if (placed_ligand_list is not None) :
print >> f, """draw_simple_zoom_list("""
print >> f, """ title="Placed ligands","""
print >> f, """ items=%s)""" % str(placed_ligand_list)
f.close()
| f.write("""peaks_file="%s",\n""" % peaks_file_name) |
00_javascript.js | // Copyright 2019 NEC Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//////// ----コールバックファンクション ////////
function callback() {}
callback.prototype = {
Filter1Tbl_add_selectbox : function( result ){
var filterAreaWrap = 'Filter1_Nakami';
var strFilterPrintId = 'Filter1Tbl';
var containerClassName = 'fakeContainer_Filter1Setting';
var intMaxWidth = 650;
var htmlSetExcute = true;
var errMsgBody = '';
var ary_result = getArrayBySafeSeparator(result);
checkTypicalFlagInHADACResult(ary_result);
var resultSetTargetSeq = ary_result[2];
var resultContentTag = ary_result[3];
var objHtmlSetArea = $('#'+filterAreaWrap+' .'+resultSetTargetSeq).get()[0];
if( objHtmlSetArea === null ){
htmlSetExcute = false;
}else{
if( ary_result[0] != "000" ){
htmlSetExcute = false;
errMsgBody = ary_result[2];
}
}
if( htmlSetExcute == true ){
//----生成されたセレクトタグ、を埋め込み
$(objHtmlSetArea).html(resultContentTag);
//生成されたセレクトタグ、を埋め込み----
addPullDownBox(filterAreaWrap, strFilterPrintId, intMaxWidth, resultSetTargetSeq, containerClassName);
}else{
window.alert(getSomeMessage("ITAWDCC90101"));
}
showForDeveloper(result);
},
Filter1Tbl_reload : function( result ){
var filterAreaWrap = 'Filter1_Nakami';
var strFilterPrintId = 'Filter1Tbl';
var htmlSetExcute = true;
var errMsgBody = '';
var ary_result = getArrayBySafeSeparator(result);
checkTypicalFlagInHADACResult(ary_result);
var resultContentTag = ary_result[2];
var objTableArea=$('#'+filterAreaWrap+' .table_area').get()[0];
if( objTableArea === null){
htmlSetExcute = false;
}else{
if( ary_result[0] != "000" ){
htmlSetExcute = false;
errMsgBody = ary_result[2];
}
}
if( htmlSetExcute == true ){
objTableArea.innerHTML = resultContentTag;
adjustTableAuto (strFilterPrintId,
"sDefault",
"fakeContainer_Filter1Setting",
webStdTableHeight,
webStdTableWidth );
linkDateInputHelper(filterAreaWrap);
if( ary_result[3]==1 ){
reset_filter(true);
}
}else{
window.alert(getSomeMessage("ITAWDCC90101"));
}
showForDeveloper(result);
if( filter_on_flag == false ){
filter_on_flag = true;
setFilter_special();
}
},
Filter1Tbl_recCount : function(result){
var strMixOuterFrameName = 'Mix1_Nakami';
var ary_result = getArrayBySafeSeparator(result);
checkTypicalFlagInHADACResult(ary_result);
var resultContentTag = ary_result[2];
var objAlertArea=$('#'+strMixOuterFrameName+' .alert_area').get()[0];
objAlertArea.style.display = "none";
if( ary_result[0] == "000" ){
if( ckRangeOfAlert(ary_result[2], webPrintRowLimit) ){
window.alert(getSomeMessage("ITAWDCC90103",{0:webPrintRowLimit,1:ary_result[2]}));
// Web表を表示しない
print_async(0);
}else{
if( ckRangeOfConfirm(ary_result[2] , webPrintRowConfirm, webPrintRowLimit) ){
if( window.confirm( getSomeMessage("ITAWDCC20201",{0:ary_result[2]})) ){
// Web表を表示する
print_async(1);
}else{
// Web表を表示しない
print_async(0);
}
}else{
// Web表を表示する
print_async(1);
}
}
}else if( ary_result[0] == "002" ){
window.alert(getSomeMessage("ITAWDCC90102"));
objAlertArea.innerHTML = ary_result[2];
objAlertArea.style.display = "block";
}else{
window.alert(getSomeMessage("ITAWDCC90101"));
}
showForDeveloper(result);
},
Filter1Tbl_printTable : function(result){
var strMixOuterFrameName = 'Mix1_Nakami';
var strMixInnerFramePrefix = 'Mix1_';
var ary_result = getArrayBySafeSeparator(result);
checkTypicalFlagInHADACResult(ary_result);
var resultContentTag = ary_result[2];
var objAlertArea=$('#'+strMixOuterFrameName+' .alert_area').get()[0];
objAlertArea.style.display = "none";
var objPrintArea=$('#'+strMixOuterFrameName+' .table_area').get()[0];
if( ary_result[0] == "000" ){
objPrintArea.innerHTML = resultContentTag;
adjustTableAuto (strMixInnerFramePrefix+'1',
"sDefault",
"fakeContainer_Filter1Print",
webStdTableHeight,
webStdTableWidth );
adjustTableAuto (strMixInnerFramePrefix+'2',
"sDefault",
"fakeContainer_ND_Filter1Sub",
webStdTableHeight,
webStdTableWidth );
}else if( ary_result[0] == "002" ){
window.alert(getSomeMessage("ITAWDCC90102"));
objAlertArea.innerHTML = ary_result[2];
objAlertArea.style.display = "block";
objPrintArea.innerHTML = "";
}else{
window.alert(getSomeMessage("ITAWDCC90101"));
}
showForDeveloper(result);
},
Mix1_1_updateTable : function( result ){
var strMixOuterFrameName = 'Mix1_Nakami';
var strMixInnerFramePrefix = 'Mix1_';
var ary_result = getArrayBySafeSeparator(result);
checkTypicalFlagInHADACResult(ary_result);
var resultContentTag = ary_result[2];
var objAlertArea=$('#'+strMixOuterFrameName+' .alert_area').get()[0];
if( ary_result[0] == "000" ){
var objUpdateArea=$('#'+strMixOuterFrameName+' .table_area').get()[0];
switch( ary_result[1] ){
case "200":
// エラーなく更新完了
case "100":
window.alert(ary_result[2]);
objUpdateArea.innerHTML = "";
search_async();
break;
default:
objUpdateArea.innerHTML="";
$(objUpdateArea).html(resultContentTag);
adjustTableAuto (strMixInnerFramePrefix+'1',
"sDefault",
"fakeContainer_Update1",
webStdTableHeight,
webStdTableWidth );
linkDateInputHelper(strMixOuterFrameName);
}
}else if( ary_result[0] == "002" ){
window.alert(getSomeMessage("ITAWDCC90102"));
objAlertArea.innerHTML = resultContentTag;
objAlertArea.style.display = "block";
setInputButtonDisable(strMixOuterFrameName,'disableAfterPush',false);
}else if( ary_result[0] == "003" ){
var objUpdateArea=$('#'+strMixOuterFrameName+' .table_area').get()[0];
objUpdateArea.innerHTML="";
objAlertArea.innerHTML = resultContentTag;
objAlertArea.style.display = "block";
}else{
window.alert(getSomeMessage("ITAWDCC90101"));
}
showForDeveloper(result);
},
Mix1_1_deleteTable : function( result ){
var strMixOuterFrameName = 'Mix1_Nakami';
var strMixInnerFramePrefix = 'Mix1_';
var ary_result = getArrayBySafeSeparator(result);
checkTypicalFlagInHADACResult(ary_result);
var resultContentTag = ary_result[2];
var objAlertArea=$('#'+strMixOuterFrameName+' .alert_area').get()[0];
if( ary_result[0] == "000" ){
var objDeleteArea=$('#'+strMixOuterFrameName+' .table_area').get()[0];
switch( ary_result[1] ){
case "210":
// エラーなく廃止完了
case "200":
// エラーなく復活完了
case "100":
window.alert(resultContentTag);
objDeleteArea.innerHTML = "";
search_async();
break;
default:
objDeleteArea.innerHTML="";
objDeleteArea.insertAdjacentHTML("beforeend",resultContentTag);
adjustTableAuto (strMixInnerFramePrefix+'1',
"sDefault",
"fakeContainer_Delete1",
webStdTableHeight,
webStdTableWidth );
}
}else if( ary_result[0] == "002" ){
window.alert(getSomeMessage("ITAWDCC90102"));
objAlertArea.innerHTML = resultContentTag;
objAlertArea.style.display = "block";
setInputButtonDisable(strMixOuterFrameName,'disableAfterPush',false);
}else if( ary_result[0] == "003" ){
var objDeleteArea=$('#'+strMixOuterFrameName+' .table_area').get()[0];
objDeleteArea.innerHTML="";
objAlertArea.innerHTML = resultContentTag;
objAlertArea.style.display = "block";
}else{
window.alert(getSomeMessage("ITAWDCC90101"));
}
showForDeveloper(result);
},
Mix2_1_registerTable : function( result ){
var strMixOuterFrameName = 'Mix2_Nakami';
var strMixInnerFramePrefix = 'Mix2_';
var ary_result = getArrayBySafeSeparator(result);
checkTypicalFlagInHADACResult(ary_result);
|
var objAlertArea=$('#'+strMixOuterFrameName+' .alert_area').get()[0];
if( ary_result[0] == "000" ){
var objRegiterArea=$('#'+strMixOuterFrameName+' .table_area').get()[0];
switch( ary_result[1] ){
case "100":
window.alert(resultContentTag);
objRegiterArea.innerHTML = "";
search_async();
break;
case "201":
// エラーなく登録完了
default:
objRegiterArea.innerHTML="";
$(objRegiterArea).html(resultContentTag);
objAlertArea.style.display = "none";
adjustTableAuto (strMixInnerFramePrefix+'1',
"sDefault",
"fakeContainer_Register2",
webStdTableHeight,
webStdTableWidth );
linkDateInputHelper(strMixOuterFrameName);
}
}else if( ary_result[0] == "002" ){
window.alert(getSomeMessage("ITAWDCC90102"));
objAlertArea.innerHTML = resultContentTag;
objAlertArea.style.display = "block";
setInputButtonDisable(strMixOuterFrameName,'disableAfterPush',false);
}else if( ary_result[0] == "003" ){
var objRegiterArea=$('#'+strMixOuterFrameName+' .table_area').get()[0];
objRegiterArea.innerHTML="";
objAlertArea.innerHTML = resultContentTag;
objAlertArea.style.display = "block";
}else{
window.alert(getSomeMessage("ITAWDCC90101"));
}
showForDeveloper(result);
},
Journal1Tbl_printJournal : function( result ){
var strMixOuterFrameName = 'Journal1_Nakami';
var strMixInnerFrame = 'Journal1Tbl';
var ary_result = getArrayBySafeSeparator(result);
var resultContentTag = ary_result[2];
var objAlertArea=$('#'+strMixOuterFrameName+' .alert_area').get()[0];
checkTypicalFlagInHADACResult(ary_result);
objAlertArea.style.display = "none";
var objPrintArea=$('#'+strMixOuterFrameName+' .table_area').get()[0];
if( ary_result[0] == "000" ){
objPrintArea.innerHTML = resultContentTag;
adjustTableAuto (strMixInnerFrame,
"sDefault",
"fakeContainer_Journal1Print",
webStdTableHeight,
webStdTableWidth );
}else if( ary_result[0] == "002" ){
window.alert(getSomeMessage("ITAWDCC90102"));
objAlertArea.innerHTML = resultContentTag;
objAlertArea.style.display = "block";
objPrintArea.innerHTML = "";
}else{
window.alert(getSomeMessage("ITAWDCC90101"));
}
showForDeveloper(result);
}
//---- ここからカスタマイズした場合の[callback]メソッド配置域
// ここまでカスタマイズした場合の[callback]メソッド配置域----
}
//////// テーブルレイアウト設定 ////////
var pageType;
var privilege;
var webPrintRowLimit;
var webPrintRowConfirm;
var webStdTableWidth;
var webStdTableHeight;
var msgTmpl = {};
//////// 画面生成時に初回実行する処理 ////////
var proxy = new Db_Access(new callback());
var filter_on_flag = false;
window.onload = function(){
var filter1AreaWrap = 'Filter1_Nakami';
pageType = document.getElementById('pageType').innerHTML;
privilege = parseInt(document.getElementById('privilege').innerHTML);
webPrintRowConfirm = parseInt(document.getElementById('sysWebRowConfirm').innerHTML);
webPrintRowLimit = parseInt(document.getElementById('sysWebRowLimit').innerHTML);
webStdTableWidth = document.getElementById('webStdTableWidth').innerHTML;
webStdTableHeight = document.getElementById('webStdTableHeight').innerHTML;
// しばらくお待ち下さいを出す
var objTableArea = $('#'+filter1AreaWrap+' .table_area').get()[0];
objTableArea.innerHTML = "<div class=\"wait_msg\" >"+getSomeMessage("ITAWDCC10102")+"</div>";
proxy.Filter1Tbl_reload(0);
// ----サイト個別、事前処理
// サイト個別、事前処理----
// テーブル表示用領域に初期メッセ時を表示しておく
//----※ここに一覧が表示されます。
document.getElementById('table_area').innerHTML = getSomeMessage("ITAWDCC10101");
if(privilege != 2){
// 登録の初期HTMLを表示する
show('Mix2_Midashi' ,'Mix2_Nakami' );
register_async(0);
}
// ----サイト個別、事前処理
// サイト個別、事前処理----
show('SetsumeiMidashi' ,'SetsumeiNakami' );
show('Mix1_Midashi' ,'Mix1_Nakami' );
show('AllDumpMidashi' ,'AllDumpNakami' );
show('Journal1_Midashi' ,'Journal1_Nakami' );
// ----サイト個別メニュー、ここから
// サイト個別メニュー、ここまで----
}
//////// コールバックファンクション---- ////////
//////// ----セレクトタグ追加ファンクション ////////
function add_selectbox( show_seq ){
proxy.Filter1Tbl_add_selectbox(show_seq);
}
//////// セレクトタグ追加ファンクション---- ////////
//////// ----表示フィルタリセット用ファンクション ////////
function reset_filter(boolBack){
// 検索条件をクリア(リセット)
var filterAreaWrap = 'Filter1_Nakami';
var strMixOuterFrameName = 'Mix1_Nakami';
if( boolBack===true ){
var objHyoujiFlag = $('#'+strMixOuterFrameName+' .hyouji_flag').get()[0];
if( objHyoujiFlag != null ){
// すでに一覧が表示されている場合(オートフィルタがonの場合、一覧を最新化する)
var objFCSL = $('#'+filterAreaWrap+' .filter_ctl_start_limit').get()[0];
if( objFCSL == null){
}else{
if( objFCSL.value == 'on' && objFCSL.checked == true ){
// タグが存在し、オートフィルタにチェックが入っている
//----再表示しますか?
if( window.confirm( getSomeMessage("ITAWDCC20204")) ){
search_async();
}
}
}
}
}else{
proxy.Filter1Tbl_reload(1);
}
}
//////// 表示フィルタリセット用ファンクション---- ////////
//////// ----search_asyncを呼ぶかどうか判断するファンクション ////////
function pre_search_async(inputedCode){
// ----Enterキーが押された場合
if( inputedCode == 13 ){
search_async('keyInput13');
}
// Enterキーが押された場合----
}
//////// search_asyncを呼ぶかどうか判断するファンクション---- ////////
//////// ----フィルタ結果表示呼出ファンクション[1] ////////
function search_async( value1 ){
var filterAreaWrap = 'Filter1_Nakami';
var printAreaWrap = 'Mix1_Nakami';
var printAreaHead = 'Mix1_Midashi';
var exec_flag = true;
// 引数を準備
var filter_data = $("#"+filterAreaWrap+" :input").serializeArray();
exec_flag = search_control(exec_flag, value1);
var objUpdTag = $('#'+printAreaWrap+' .editing_flag').get()[0];
if ( objUpdTag != null ){
// 更新系(更新/廃止/復活)モード中の場合はSELECTモードに戻っていいか尋ねる
if( exec_flag == true ){
//----メンテナンス中ですが中断してよろしいですか?
if( !window.confirm( getSomeMessage("ITAWDCC20203") ) ){
exec_flag = false;
}
}
}
if( exec_flag ){
// 更新時アラート出力エリアをブランクにしたうえ非表示にする
var objAlertArea=$('#'+printAreaWrap+' .alert_area').get()[0];
objAlertArea.innerHTML = "";
objAlertArea.style.display = "none";
// テーブル表示用領域を一旦クリアする
var objTableArea=$('#'+printAreaWrap+' .table_area').get()[0];
//----※ここに一覧が表示されます。
objTableArea.innerHTML = "";
// テーブル表示用領域を開く
if( checkOpenNow(printAreaWrap)===false ){
show(printAreaHead, printAreaWrap);
}
// IEのときだけ全見開きを開閉して画面を再構築するファンクションを呼び出し
restruct_for_IE();
// proxy.Filter1Tbl_recCount実行
proxy.Filter1Tbl_recCount(filter_data);
}
}
//////// フィルタ結果表示呼出ファンクション[1]---- ////////
//////// ----フィルタ結果表示呼出ファンクション[2] ////////
function search_control( exec_flag_var, value1 ){
var filterAreaWrap = 'Filter1_Nakami';
var exec_flag_ret = true;
if( typeof(value1) == 'undefined' ){
// value1がundefined型の場合
exec_flag_ret = exec_flag_var;
}else{
if( exec_flag_var == false ){
exec_flag_ret = false;
}else{
var objFCSL = $('#'+filterAreaWrap+' .filter_ctl_start_limit').get()[0];
if(objFCSL == null){
// 自動開始制御タグがない場合は、システムエラー扱い、とする。
// システムエラーが発生しました。
alert( getSomeMessage("ITAWDCC20205") );
exit;
}else{
if( objFCSL.value == 'on' ){
// 自動開始制御タグが存在し、オートフィルタ開始の抑制が働いている可能性がある
exec_flag_ret = false;
if( value1 == 'orderFromFilterCmdBtn' ){
// フィルタボタンが押された場合、条件「なし」で開始----
exec_flag_ret = true;
}else if( value1 == 'idcolumn_filter_default' || value1 == 'keyInput13' ){
if( objFCSL.checked == true ){
// 自動開始制御タグが存在し、オートフィルタにチェックが入っている
exec_flag_ret = true;
}
}else{
exec_flag_ret = true;
}
}
}
}
}
return exec_flag_ret;
}
//////// フィルタ結果表示呼出ファンクション[2]---- ////////
//////// ----検索条件指定用ファンクション ////////
function print_async( intPrintMode ){
var filterAreaWrap = 'Filter1_Nakami';
var printAreaWrap = 'Mix1_Nakami';
var printAreaHead = 'Mix1_Midashi';
var filter_data=$('#'+filterAreaWrap+' :input').serializeArray();
// テーブル表示用領域を開く
if( checkOpenNow(printAreaWrap)===false ){
show(printAreaHead, printAreaWrap);
}
// しばらくお待ち下さいを出す
var objTableArea = $('#'+printAreaWrap+' .table_area').get()[0];
objTableArea.innerHTML = "<div class=\"wait_msg\" >"+getSomeMessage("ITAWDCC10102")+"</div>";
// IEのときだけ全見開きを開閉して画面を再構築するファンクションを呼び出し
restruct_for_IE();
// proxy.Filter1Tbl_printTable実行
proxy.Filter1Tbl_printTable(intPrintMode, filter_data);
}
//////// 検索条件指定用ファンクション---- ////////
//////// ----登録初期画面に戻るかどうか判定するファンクション ////////
function pre_register_async( mode ){
//----登録中ですが中断してよろしいですか?
if( window.confirm( getSomeMessage("ITAWDCC20202")) ){
register_async(0);
}
}
//////// 登録初期画面に戻るかどうか判定するファンクション---- ////////
//////// ----登録画面遷移用ファンクション ////////
function register_async( mode ){
var registerAreaWrap = 'Mix2_Nakami';
// アラート用エリアを初期化
var objAlertArea = $('#'+registerAreaWrap+' .alert_area').get()[0];
objAlertArea.innerHTML = '';
objAlertArea.style.display = "none";
// IEのときだけ全見開きを開閉して画面を再構築するファンクションを呼び出し
restruct_for_IE();
// アラートメッセージ格納変数を初期化
var alt_str = '';
// registerTableファンクション呼び出し要否フラグ
var exec_flag = true;
// モードによって動きを決定
switch( mode ){
case 0 :
// 初期画面(mode=0)
// 引数準備必要なし
break;
case 1 :
// 登録フォーム画面(mode=1)
// 引数準備必要なし
break;
case 2 :
// 登録実行処理&結果画面(mode=2)
// 登録時のチェック
//----登録を実行してよろしいですか?
if( window.confirm(getSomeMessage("ITAWDCC20101")) == false ){
exec_flag = false;
}else{
setInputButtonDisable(registerAreaWrap,'disableAfterPush',true);
}
break;
}
if( exec_flag ){
// proxy.registerTable実行
var registerData = $('#'+registerAreaWrap+' :input').serializeArray();
proxy.Mix2_1_registerTable(mode, registerData);
}
}
//////// 登録画面遷移用ファンクション---- ////////
//////// ----更新画面遷移用ファンクション ////////
function update_async( mode, inner_seq, updateAreaName ){
var updateAreaWrap = 'Mix1_Nakami';
// アラートメッセージ格納変数を初期化
var alt_str = '';
// updateTableファンクション呼び出し要否フラグ
var exec_flag = true;
// モードによって動きを決定
switch( mode ){
case 1 :
// 更新画面に遷移(mode=1)
// アラート用エリアを初期化
var objAlertArea = $('#'+updateAreaWrap+' .alert_area').get()[0];
objAlertArea.innerHTML = '';
objAlertArea.style.display = "none";
// IEのときだけ全見開きを開閉して画面を再構築するファンクションを呼び出し
restruct_for_IE();
break;
case 2 :
// 更新画面から一覧に戻る(mode=2)
// 呼び出し要否フラグをOFF
exec_flag = false;
// search_asyncを呼び出し
search_async();
break;
case 3 :
// 更新画面で実行を押下(mode=3)
//----更新を実行してよろしいですか?
if( window.confirm( getSomeMessage("ITAWDCC20102") ) ){
// アラート用エリアを初期化
var objAlertArea = $('#'+updateAreaWrap+' .alert_area').get()[0];
objAlertArea.innerHTML = '';
objAlertArea.style.display = "none";
setInputButtonDisable(updateAreaWrap,'disableAfterPush',true);
// IEのときだけ全見開きを開閉して画面を再構築するファンクションを呼び出し
restruct_for_IE();
}else{
exec_flag = false;
}
break;
}
if(exec_flag){
var updateData = $('#'+updateAreaWrap+' :input').serializeArray();
//proxy.updateTable実行
proxy.Mix1_1_updateTable( mode, inner_seq, updateData);
}
}
//////// 更新画面遷移用ファンクション---- ////////
//////// ----削除画面遷移用ファンクション ////////
function delete_async( mode, inner_seq ){
var deleteAreaWrap = 'Mix1_Nakami';
// アラートメッセージ格納変数を初期化
var alt_str = '';
// deleteTableファンクション呼び出し要否フラグ
var exec_flag = true;
// モードによって動きを決定
switch( mode ){
case 1 :
// 廃止画面に遷移(mode=1)
// アラート用エリアを初期化
var objAlertArea = $('#'+deleteAreaWrap+' .alert_area').get()[0];
objAlertArea.innerHTML = '';
objAlertArea.style.display = "none";
// IEのときだけ全見開きを開閉して画面を再構築するファンクションを呼び出し
restruct_for_IE();
break;
case 2 :
// 廃止画面から一覧に戻る(mode=2)
// 呼び出し要否フラグをOFF
exec_flag = false;
// search_asyncを呼び出し
search_async();
break;
case 3 :
// 廃止画面で実行を押下(mode=3)
//----廃止してよろしいですか?
if( window.confirm( getSomeMessage("ITAWDCC20103") ) ){
// アラート用エリアを初期化
var objAlertArea = $('#'+deleteAreaWrap+' .alert_area').get()[0];
objAlertArea.innerHTML = '';
objAlertArea.style.display = "none";
setInputButtonDisable(deleteAreaWrap,'disableAfterPush',true);
// IEのときだけ全見開きを開閉して画面を再構築するファンクションを呼び出し
restruct_for_IE();
}else{
exec_flag = false;
}
break;
case 4 :
// 復活画面に遷移(mode=4)
// アラート用エリアを初期化
var objAlertArea = $('#'+deleteAreaWrap+' .alert_area').get()[0];
objAlertArea.innerHTML = '';
objAlertArea.style.display = "none";
// IEのときだけ全見開きを開閉して画面を再構築するファンクションを呼び出し
restruct_for_IE();
break;
case 5 :
// 復活画面で実行を押下(mode=5)
//----復活してよろしいですか?
if( window.confirm( getSomeMessage("ITAWDCC20104") ) ){
// アラート用エリアを初期化
var objAlertArea = $('#'+deleteAreaWrap+' .alert_area').get()[0];
objAlertArea.innerHTML = '';
objAlertArea.style.display = "none";
setInputButtonDisable(deleteAreaWrap,'disableAfterPush',true);
// IEのときだけ全見開きを開閉して画面を再構築するファンクションを呼び出し
restruct_for_IE();
}else{
exec_flag = false;
}
break;
}
if(exec_flag){
var updateData = $('#'+deleteAreaWrap+' :input').serializeArray();
// proxy.deleteTable実行
proxy.Mix1_1_deleteTable(mode, inner_seq, updateData);
}
}
//////// 削除画面遷移用ファンクション---- ////////
//////// ----履歴検索条件クリア(リセット)用ファンクション ////////
function Journal1Tbl_reset_query(){
var journal1AreaWrap = 'Journal1_Nakami';
// 検索条件をクリア(リセット)
$('#'+journal1AreaWrap+' :input:not(:button)').each(function(){this.value=""});
}
//////// 履歴検索条件クリア(リセット)用ファンクション---- ////////
//////// ----search_journal_asyncを呼ぶかどうか判断するファンクション ////////
function Journal1Tbl_pre_search_async(inputedCode){
if( inputedCode == 13 ){
Journal1Tbl_search_async();
}
}
//////// search_journal_asyncを呼ぶかどうか判断するファンクション---- ////////
//////// ----履歴検索条件指定用ファンクション ////////
function Journal1Tbl_search_async(){
// 履歴検索実施フラグを初期化
var journal1AreaWrap = 'Journal1_Nakami';
var exec_flag = true;
// 検索実施フラグがtrueの場合は検索実施
if( exec_flag == true ){
// しばらくお待ち下さいを出す
var objTableArea = $('#'+journal1AreaWrap+' .table_area').get()[0];
objTableArea.innerHTML = "<div class=\"wait_msg2\" >"+getSomeMessage("ITAWDCC10102")+"</div>";
var filterData = $('#'+journal1AreaWrap+' :input:not(:button)').serializeArray();
proxy.Journal1Tbl_printJournal(filterData);
}
}
//////// 履歴検索条件指定用ファンクション---- ////////
//////// ----汎用系ファンクション ////////
function setInputButtonDisable(rangeId,targetClass,toValue){
if(toValue === true){
$('#'+rangeId+' .'+targetClass).attr("disabled",true);
}else{
$('#'+rangeId+' .'+targetClass).removeAttr("disabled");
}
}
//////// 汎用系ファンクション---- ////////
//---- ここからカスタマイズした場合の一般メソッド配置域
// ここまでカスタマイズした場合の一般メソッド配置域----
function setFilter_special(){
//GET送信された値を「=」でスプリットする
var pj_name_org = decodeURIComponent(location.search);
var initialFilterEl;
var initialFilter = 0;
if( pj_name_org != "" ){
var parameters = pj_name_org.substring(1).split('&');
for (var i = 0; i < parameters.length; i++) {
// パラメータ名とパラメータ値に分割する
var element = parameters[i].split('=');
if( element[0] === 'task_no' ){
//('Filter1Tbl___S')で、値を格納する場所を指定(廃止を飛ばした右から数えた数値を記入、項番は格納する箇所が二ヵ所あるので_Sと_Eで指定)
var objTgtNum = document.getElementById('Filter1Tbl_1__S');
objTgtNum.value = element[1];
var objTgtNum = document.getElementById('Filter1Tbl_1__E');
objTgtNum.value = element[1];
initialFilter = 2;
search_async('orderFromFilterCmdBtn');
}
}
}
if( initialFilter != 2 ){
initialFilterEl = document.getElementById('sysInitialFilter');
if(initialFilterEl == null){
initialFilter = 2;
}
else{
initialFilter = initialFilterEl.innerHTML;
}
if(initialFilter == 1){
search_async('orderFromFilterCmdBtn');
}
}
} | var resultContentTag = ary_result[2]; |
test_basic.py | # Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from numpy.testing import assert_array_equal
from metpy.units import units
from metpy.testing import assert_almost_equal, assert_array_almost_equal
from metpy.calc.basic import * # noqa
def test_wind_comps_basic():
'Test the basic wind component calculation.'
speed = np.array([4, 4, 4, 4, 25, 25, 25, 25, 10.]) * units.mph
dirs = np.array([0, 45, 90, 135, 180, 225, 270, 315, 360]) * units.deg
s2 = np.sqrt(2.)
u, v = get_wind_components(speed, dirs)
true_u = np.array([0, -4 / s2, -4, -4 / s2, 0, 25 / s2, 25, 25 / s2, 0]) * units.mph
true_v = np.array([-4, -4 / s2, 0, 4 / s2, 25, 25 / s2, 0, -25 / s2, -10]) * units.mph
assert_array_almost_equal(true_u, u, 4)
assert_array_almost_equal(true_v, v, 4)
def test_wind_comps_scalar():
'Test scalar wind components'
u, v = get_wind_components(8 * units('m/s'), 150 * units.deg)
assert_almost_equal(u, -4 * units('m/s'), 3)
assert_almost_equal(v, 6.9282 * units('m/s'), 3)
def test_speed():
'Basic test of wind speed calculation'
u = np.array([4., 2., 0., 0.]) * units('m/s')
v = np.array([0., 2., 4., 0.]) * units('m/s')
speed = get_wind_speed(u, v)
s2 = np.sqrt(2.)
true_speed = np.array([4., 2 * s2, 4., 0.]) * units('m/s')
assert_array_almost_equal(true_speed, speed, 4)
def test_dir():
'Basic test of wind direction calculation'
u = np.array([4., 2., 0., 0.]) * units('m/s')
v = np.array([0., 2., 4., 0.]) * units('m/s')
direc = get_wind_dir(u, v)
true_dir = np.array([270., 225., 180., 270.]) * units.deg
assert_array_almost_equal(true_dir, direc, 4)
def test_speed_dir_roundtrip():
'Convert from wind speed and direction to u,v and back'
# Test each quadrant of the whole circle
wspd = np.array([15., 5., 2., 10.]) * units.meters / units.seconds
wdir = np.array([160., 30., 225., 350.]) * units.degrees
u, v = get_wind_components(wspd, wdir)
wdir_out = get_wind_dir(u, v)
wspd_out = get_wind_speed(u, v)
assert_array_almost_equal(wspd, wspd_out, 4)
assert_array_almost_equal(wdir, wdir_out, 4)
def test_scalar_speed():
'Test wind speed with scalars'
s = get_wind_speed(-3. * units('m/s'), -4. * units('m/s'))
assert_almost_equal(s, 5. * units('m/s'), 3)
def test_scalar_dir():
'Test wind direction with scalars'
d = get_wind_dir(3. * units('m/s'), 4. * units('m/s'))
assert_almost_equal(d, 216.870 * units.deg, 3)
def test_windchill_scalar():
'Test wind chill with scalars'
wc = windchill(-5 * units.degC, 35 * units('m/s'))
assert_almost_equal(wc, -18.9357 * units.degC, 0)
def test_windchill_basic():
'Test the basic wind chill calculation.'
temp = np.array([40, -10, -45, 20]) * units.degF
speed = np.array([5, 55, 25, 15]) * units.mph
wc = windchill(temp, speed)
values = np.array([36, -46, -84, 6]) * units.degF
assert_array_almost_equal(wc, values, 0)
def test_windchill_invalid():
'Test for values that should be masked.'
temp = np.array([10, 51, 49, 60, 80, 81]) * units.degF
speed = np.array([4, 4, 3, 1, 10, 39]) * units.mph
wc = windchill(temp, speed)
mask = np.array([False, True, True, True, True, True])
assert_array_equal(wc.mask, mask)
def test_windchill_undefined_flag():
'Tests whether masking values can be disabled.'
temp = units.Quantity(np.ma.array([49, 50, 49, 60, 80, 81]), units.degF)
speed = units.Quantity(([4, 4, 3, 1, 10, 39]), units.mph)
wc = windchill(temp, speed, mask_undefined=False)
mask = np.array([False] * 6)
assert_array_equal(wc.mask, mask)
def test_windchill_face_level():
'Tests using the face_level flag'
temp = np.array([20, 0, -20, -40]) * units.degF
speed = np.array([15, 30, 45, 60]) * units.mph | values = np.array([3, -30, -64, -98]) * units.degF
assert_array_almost_equal(wc, values, 0)
def test_heat_index_basic():
'Test the basic heat index calculation.'
temp = np.array([80, 88, 92, 110]) * units.degF
rh = np.array([40, 100, 70, 40]) * units.percent
hi = heat_index(temp, rh)
values = np.array([80, 121, 112, 136]) * units.degF
assert_array_almost_equal(hi, values, 0)
def test_heat_index_scalar():
'Test heat index using scalars'
hi = heat_index(96 * units.degF, 65 * units.percent)
assert_almost_equal(hi, 121 * units.degF, 0)
def test_heat_index_invalid():
'Test for values that should be masked.'
temp = np.array([80, 88, 92, 79, 30, 81]) * units.degF
rh = np.array([40, 39, 2, 70, 50, 39]) * units.percent
hi = heat_index(temp, rh)
mask = np.array([False, True, True, True, True, True])
assert_array_equal(hi.mask, mask)
def test_heat_index_undefined_flag():
'Tests whether masking values can be disabled.'
temp = units.Quantity(np.ma.array([80, 88, 92, 79, 30, 81]), units.degF)
rh = np.ma.array([40, 39, 2, 70, 50, 39]) * units.percent
hi = heat_index(temp, rh, mask_undefined=False)
mask = np.array([False] * 6)
assert_array_equal(hi.mask, mask)
def test_heat_index_units():
'Test units coming out of heat index'
temp = units.Quantity([35., 20.], units.degC)
rh = 70 * units.percent
hi = heat_index(temp, rh)
assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)
def test_heat_index_ratio():
'Test giving humidity as number [0, 1]'
temp = units.Quantity([35., 20.], units.degC)
rh = 0.7
hi = heat_index(temp, rh)
assert_almost_equal(hi.to('degC'), units.Quantity([50.3405, np.nan], units.degC), 4)
# class TestIrrad(object):
# def test_basic(self):
# 'Test the basic solar irradiance calculation.'
# from datetime import date
# d = date(2008, 9, 28)
# lat = 35.25
# hours = np.linspace(6,18,10)
# s = solar_irradiance(lat, d, hours)
# values = np.array([0., 344.1, 682.6, 933.9, 1067.6, 1067.6, 933.9,
# 682.6, 344.1, 0.])
# assert_array_almost_equal(s, values, 1)
# def test_scalar(self):
# from datetime import date
# d = date(2008, 9, 28)
# lat = 35.25
# hour = 9.5
# s = solar_irradiance(lat, d, hour)
# assert_almost_equal(s, 852.1, 1)
# def test_invalid(self):
# 'Test for values that should be masked.'
# from datetime import date
# d = date(2008, 9, 28)
# lat = 35.25
# hours = np.linspace(0,22,12)
# s = solar_irradiance(lat, d, hours)
# mask = np.array([ True, True, True, True, False, False, False,
# False, False, True, True, True])
# assert_array_equal(s.mask, mask)
def test_pressure_to_heights_basic():
'Tests basic pressure to height calculation.'
pressures = np.array([975.2, 987.5, 956., 943.]) * units.mbar
heights = pressure_to_height_std(pressures)
values = np.array([321.5, 216.5, 487.6, 601.7]) * units.meter
assert_almost_equal(heights, values, 1) |
wc = windchill(temp, speed, face_level_winds=True) |
shard.py | import database
def load_shard_from_db(conf):
#TODO: load shard from cache if exists
shards = database.load_shard(conf)
return shards
def get_shard(shards, url):
"""
Hash function for shading scheme
returns a dict with hostname and table name
Eg: s = { 'hostname': 'node1', 'table_name': 'url_s1'}
"""
if not shards:
return {} | else:
return shards[hash(str(url['hostname'])+str(url['port'])+str(url['path'])) % len(shards)] |
|
lhgcore.js | /*
*@lhgcore - JavaScript Library v1.0.0 - Date : 2009-7-14
*@Copyright lhgcore.js (c) 2009 By LiHuiGang Reserved | */
(function(){var g=window.lhgcore=window.J=function(a,d){return g.ret.init(a,d)};g.ret=g.prototype={init:function(a,d){a=(a=='body')?document.body:(a=='doc')?document:a;if('string'==typeof(a)){if(a.indexOf('#')==0){var b=(d||document).getElementById(a.substr(1));if(b)return b;else return null}var b=(d||document).getElementById(a);if(b)return g(b);else return null}else{this[0]=a;this.length=1;return this}},html:function(t){if(t){this[0].innerHTML=t;return this}else return this[0].innerHTML},isnl:function(){var v=this[0].value;return(v==''||v.length==0)?true:false},val:function(v){if(v){this[0].value=v;return this}else return this[0].value},acls:function(c,p){this[0].className=p?this[0].className+' '+c:c;return this},rcls:function(){var a=g.ie?'className':'class';this[0].removeAttribute(a,0);return this},crte:function(e){return this[0].createElement(e)},apch:function(c,y){switch(y){case'pr':return this[0].insertBefore(c,this[0].firstChild);break;case'be':return this[0].parentNode.insertBefore(c,this[0]);break;case'af':return this[0].parentNode.insertBefore(c,this[0].nextSibling);break;default:return this[0].appendChild(c);break}},stcs:function(d,s){if(typeof(d)=='object'){for(var n in d)this[0].style[n]=d[n];return this}else{this[0].style[d]=s;return this}},gtcs:function(p){if(g.ie)return this[0].currentStyle[p];else return this[0].ownerDocument.defaultView.getComputedStyle(this[0],'').getPropertyValue(p)},gtag:function(n){return this[0].getElementsByTagName(n)},attr:function(k,v){if(typeof(k)=='object'){for(var n in k)this[0][n]=k[n];return this}if(v){this[0].setAttribute(k,v,0);return this}else{var a=this[0].attributes[k];if(a==null||!a.specified)return'';return this[0].getAttribute(k,2)}},ratt:function(n){var a=this[0].attributes[n];if(a==null||!a.specified)return this;this[0].removeAttribute(n,0);return this},aevt:function(n,f){if(g.ie)this[0].attachEvent('on'+n,f);else this[0].addEventListener(n,f,false);return this},revt:function(n,f){if(g.ie)this[0].detachEvent('on'+n,f);else this[0].removeEventListener(n,f,false);return this},alnk:function(c){if(g.ie)return this[0].createStyleSheet(c).owningElement;else{var e=this[0].createElement('link');e.rel='stylesheet';e.type='text/css';e.href=c;this[0].getElementsByTagName('head')[0].appendChild(e);return e}}};g.ret.init.prototype=g.ret;g.exend=g.ret.exend=function(){var a=arguments[0]||{},i=1,length=arguments.length,deep=false,options;if(a.constructor==Boolean){deep=a;a=arguments[1]||{};i=2}if(typeof a!='object'&&typeof a!='function')a={};if(length==i){a=this;--i}for(;i<length;i++)if((options=arguments[i])!=null)for(var b in options){var c=a[b],copy=options[b];if(a===copy)continue;if(deep&©&&typeof copy=='object'&&!copy.nodeType)a[b]=g.extend(deep,c||(copy.length!=null?[]:{}),copy);else if(copy!==undefined)a[b]=copy}return a};g.ret.exend({stopac:function(o){if(g.ie){o=Math.round(o*100);this[0].style.filter=(o>100?'':'alpha(opacity='+o+')')}else this[0].style.opacity=o},addentex:function(n,l,p){if(g.ie){var o={};o.source=this[0];o.params=p||[];o.listen=function(a){return l.apply(o.source,[a].concat(o.params))};if(g.clean)g.clean.items(null,function(){o.source=null;o.params=null});this[0].attachEvent('on'+n,o.listen);this[0]=null;p=null}else this[0].addEventListener(n,function(e){l.apply(this[0],[e].concat(p||[]))},false);return this},click:function(f){this[0].onclick=f;return this},blur:function(f){this[0].onblur=f;return this},focus:function(f){if(f)this[0].onfocus=f;else this[0].focus();return this},msdown:function(f){this[0].onmousedown=f;return this},msmove:function(f){this[0].onmousemove=f;return this},msover:function(f){this[0].onmouseover=f;return this},msout:function(f){this[0].onmouseout=f;return this},msup:function(f){this[0].onmouseup=f;return this},submit:function(f){if(f)this[0].onsubmit=f;else this[0].onsubmit();return this},cmenu:function(f){this[0].oncontextmenu=f;return this},hover:function(r,t){this[0].onmouseover=r;this[0].onmouseout=t;return this}});g.exend({build:'1.0.0',author:'LiHuiGang',path:function(t){t=t||'lhgcore.js';var a,len,sc=g('doc').gtag('script');for(var i=0;i<sc.length;i++){a=sc[i].src.substr(0,g.inde(sc[i].src.toLowerCase(),t));len=a.lastIndexOf('/');if(len>0)a=a.substr(0,len+1);if(a)break}if(g.ie&&g.inde(a,'../')!=-1){var b=window.location.href;b=b.substr(0,b.lastIndexOf('/'));while(g.inde(a,'../')>=0){a=a.substr(3);b=b.substr(0,b.lastIndexOf('/'))}return b+'/'+a}else return a},idtd:function(d){return('CSS1Compat'==(d.compatMode||'CSS1Compat'))},rech:function(c){if(c)return c.parentNode.removeChild(c)},gtev:function(){if(g.ie)return window.event;var a=this.gtev.caller;while(a!=null){var b=a.arguments[0];if(b&&(b+'').indexOf('Event')>=0)return b;a=a.caller}return null},trim:function(t){return(t||'').replace(/^\s+|\s+$/g,'')},inde:function(t,s){return t.indexOf(s)},edoc:function(a){return a.ownerDocument||a.document},ewin:function(a){return this.dwin(this.edoc(a))},dwin:function(d){if(g.sa&&!d.parentWindow)this.fixw(window.top);return d.parentWindow||d.defaultView},fixw:function(w){if(w.document)w.document.parentWindow=w;for(var i=0;i<w.frames.length;i++)g.fixw(w.frames[i])},vsiz:function(a){if(g.ie){var b,doc=a.document.documentElement;if(doc&&doc.clientWidth)b=doc;else b=a.document.body;if(b)return{w:b.clientWidth,h:b.clientHeight};else return{w:0,h:0}}else return{w:a.innerWidth,h:a.innerHeight}},spos:function(w){if(g.ie){var a=w.document;oPos={x:a.documentElement.scrollLeft,y:a.documentElement.scrollTop};if(oPos.x>0||oPos.y>0)return oPos;return{x:a.body.scrollLeft,y:a.body.scrollTop}}else return{x:w.pageXOffset,y:w.pageYOffset}},dpos:function(w,n){var x=0,y=0,cn=n,pn=null,cw=g.ewin(cn);while(cn&&!(cw==w&&(cn==w.document.body||cn==w.document.documentElement))){x+=cn.offsetLeft-cn.scrollLeft;y+=cn.offsetTop-cn.scrollTop;if(g.op){var a=pn;while(a&&a!=cn){x-=a.scrollLeft;y-=a.scrollTop;a=a.parentNode}}pn=cn;if(cn.offsetParent)cn=cn.offsetParent;else{if(cw!=w){cn=cw.frameElement;pn=null;if(cn)cw=cn.contentWindow.parent}else cn=null}}if(g(w.document.body).gtcs('position')!='static'||(g.ie&&g.gtan(n)==null)){x+=w.document.body.offsetLeft;y+=w.document.body.offsetTop}return{'x':x,'y':y}},gtan:function(e){var a=e;while(a!=g.edoc(a).documentElement){if(g(a).gtcs('position')!='static')return a;a=a.parentNode}return null},canc:function(e){if(g.ie)return false;else{if(e)e.preventDefault()}},empty:function(t){return(t==''||t.length==0)?true:false},dismn:function(e){var a=e||window.event,el=a.srcElement||a.target,tn=el.tagName;if(!((tn=='INPUT'&&el.type=='text')||tn=='TEXTAREA')){if(g.ie)return false;else{if(e)e.preventDefault()}}},nosel:function(o){if(g.ie){o.unselectable='on';var e,i=0;while((e=o.all[i++])){switch(e.tagName.toLowerCase()){case'iframe':case'textarea':case'input':case'select':break;default:e.unselectable='on'}}}else{if(g.mz)o.style.MozUserSelect='none';else if(g.sa)o.style.KhtmlUserSelect='none';else o.style.userSelect='none'}},gtvod:function(){if(g.ie)return(g.i7?'':'javascript:\'\'');return'javascript:void(0)'}});var j=navigator.userAgent.toLowerCase();g.exend({ie:/msie/.test(j)&&!/opera/.test(j),i7:(j.match(/msie (\d+)/)||[])[1]>=7&&!/opera/.test(j),ch:/chrome/.test(j),op:/opera/.test(j),sa:/webkit/.test(j)&&!/chrome/.test(j),mz:/mozilla/.test(j)&&!/(compatible|webkit)/.test(j)});g.exend({cleanup:function(){if(window._lhgcleanobj)this.citem=window._lhgcleanobj.citem;else{this.citem=[];window._lhgcleanobj=this;J(window).addentex('unload',this.lhg_clean)}}});g.exend(g.cleanup.prototype,{items:function(a,b){this.citem.push([a,b])},lhg_clean:function(){if(!this._lhgcleanobj)return;var a=this._lhgcleanobj.citem;while(a.length>0){var b=a.pop();if(b)b[1].call(b[0])}this._lhgcleanobj=null;g=null;if(CollectGarbage)CollectGarbage()}});if(g.ie)g.clean=new g.cleanup();J.exend({panel:function(b,w){this._win=window;var a,doc,r_win=[this._win];if(b){while(this._win.parent&&this._win.parent!=this._win){try{if(this._win.parent.document.domain!=document.domain)break}catch(e){break}this._win=this._win.parent;r_win.push(this._win)}}if(w){for(var i=0;i<w.length;i++)r_win.push(w[i])}a=this._ifrm=J(this._win.document).crte('iframe');J(a).attr({src:'javascript:void(0)',frameBorder:0,scrolling:'no'}).stcs({display:'none',position:'absolute',zIndex:19700});J(this._win.document.body).apch(a);doc=this._doc=a.contentWindow.document;if(J.ie)g.clean.items(this,this.p_clean);var c='';if(J.sa)c='<base href="'+window.document.location+'">';doc.open();doc.write('<html><head>'+c+'<\/head><body style="margin:0px;padding:0px;"><\/body><\/html>');doc.close();for(var i=0;i<r_win.length;i++)J(r_win[i].document).addentex('click',this.hide,this);J(doc).aevt('contextmenu',J.dismn);this._main=J(doc.body).apch(doc.createElement('div'));this._main.style.cssFloat='left'}});J.exend(J.panel.prototype,{applnk:function(l){J(this._doc).alnk(l)},show:function(x,y,e,w,h){var a=this._main,iw,ih;J(this._ifrm).stcs('display','block');J(a).stcs({width:w?w+'px':'',height:h?h+'px':''});iw=a.offsetWidth;ih=a.offsetHeight;if(!w)this._ifrm.style.width='1px';if(!h)this._ifrm.style.height='1px';iw=a.offsetWidth||a.firstChild.offsetWidth;var b=e.nodeType==9?J.idtd(e)?e.documentElement:e.body:e;var c=J.dpos(this._win,b);x+=c.x;y+=c.y;var d=J.vsiz(this._win),sp=J.spos(this._win),vh=d.h+sp.y,vw=d.w+sp.x;if((x+iw)>vw)x-=x+iw-vw;if((y+ih)>vh)y-=y+ih-vh;J(this._ifrm).stcs({left:x+'px',top:y+'px',width:iw+'px',height:ih+'px'})},hide:function(e,a){J(a._ifrm).stcs('display','none')},p_clean:function(){this._main=null;this._doc=null;this._ifrm=null;this._win=null}});g.ajax=g.A={geth:function(){try{return new ActiveXObject('Msxml2.XMLHTTP')}catch(e){}try{return new XMLHttpRequest()}catch(e){}return null},send:function(u,m,p,f,x){m=m?m.toLocaleUpperCase():'GET';x=x?x:0;p=p?p+'&uuid='+new Date().getTime():null;var a=(typeof(f)=='function'),ret;var b=this.geth();b.open(m,u,a);if(a){b.onreadystatechange=function(){if(b.readyState==4){ret=(x==0)?b.responseText:b.responseXML;f(ret);delete(b);return}else return false}}if(m=='GET')b.send(null);else{b.setRequestHeader('content-type','application/x-www-form-urlencoded');if(p)b.send(p);else return false}if(!a){if(b.readyState==4&&b.status==200){ret=(x==0)?b.responseText:b.responseXML;delete(b);return ret}else return false}}}})(); |
|
trgmux_lpi2c0.rs | #[doc = "Register `TRGMUX_LPI2C0` reader"]
pub struct R(crate::R<TRGMUX_LPI2C0_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<TRGMUX_LPI2C0_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<TRGMUX_LPI2C0_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<TRGMUX_LPI2C0_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `TRGMUX_LPI2C0` writer"]
pub struct W(crate::W<TRGMUX_LPI2C0_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<TRGMUX_LPI2C0_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<TRGMUX_LPI2C0_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<TRGMUX_LPI2C0_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `SEL0` reader - Trigger MUX Input 0 Source Select"]
pub struct SEL0_R(crate::FieldReader<u8, u8>);
impl SEL0_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
SEL0_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for SEL0_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `SEL0` writer - Trigger MUX Input 0 Source Select"]
pub struct SEL0_W<'a> {
w: &'a mut W,
}
impl<'a> SEL0_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x3f) | (value as u32 & 0x3f);
self.w
}
}
#[doc = "TRGMUX register lock.\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum LK_A {
#[doc = "0: Register can be written."]
_0 = 0,
#[doc = "1: Register cannot be written until the next system Reset."]
_1 = 1,
}
impl From<LK_A> for bool {
#[inline(always)]
fn from(variant: LK_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `LK` reader - TRGMUX register lock."]
pub struct LK_R(crate::FieldReader<bool, LK_A>);
impl LK_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
LK_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> LK_A {
match self.bits {
false => LK_A::_0,
true => LK_A::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline(always)]
pub fn is_0(&self) -> bool {
**self == LK_A::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline(always)]
pub fn is_1(&self) -> bool {
**self == LK_A::_1
}
}
impl core::ops::Deref for LK_R {
type Target = crate::FieldReader<bool, LK_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target |
}
#[doc = "Field `LK` writer - TRGMUX register lock."]
pub struct LK_W<'a> {
w: &'a mut W,
}
impl<'a> LK_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: LK_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Register can be written."]
#[inline(always)]
pub fn _0(self) -> &'a mut W {
self.variant(LK_A::_0)
}
#[doc = "Register cannot be written until the next system Reset."]
#[inline(always)]
pub fn _1(self) -> &'a mut W {
self.variant(LK_A::_1)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bits 0:5 - Trigger MUX Input 0 Source Select"]
#[inline(always)]
pub fn sel0(&self) -> SEL0_R {
SEL0_R::new((self.bits & 0x3f) as u8)
}
#[doc = "Bit 31 - TRGMUX register lock."]
#[inline(always)]
pub fn lk(&self) -> LK_R {
LK_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:5 - Trigger MUX Input 0 Source Select"]
#[inline(always)]
pub fn sel0(&mut self) -> SEL0_W {
SEL0_W { w: self }
}
#[doc = "Bit 31 - TRGMUX register lock."]
#[inline(always)]
pub fn lk(&mut self) -> LK_W {
LK_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "TRGMUX LPI2C0 Register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [trgmux_lpi2c0](index.html) module"]
pub struct TRGMUX_LPI2C0_SPEC;
impl crate::RegisterSpec for TRGMUX_LPI2C0_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [trgmux_lpi2c0::R](R) reader structure"]
impl crate::Readable for TRGMUX_LPI2C0_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [trgmux_lpi2c0::W](W) writer structure"]
impl crate::Writable for TRGMUX_LPI2C0_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets TRGMUX_LPI2C0 to value 0"]
impl crate::Resettable for TRGMUX_LPI2C0_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| {
&self.0
} |
utils.ts | /*
* Copyright 2021 Chaos Mesh Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
export function toTitleCase(s: string) {
return s.charAt(0).toUpperCase() + s.substr(1)
}
export function | (s: string) {
if (s.length > 25) {
return s.substring(0, 25) + '...'
}
return s
}
export function objToArrBySep(obj: Record<string, string | string[]>, separator: string, filters?: string[]) {
return Object.entries(obj)
.filter((d) => !filters?.includes(d[0]))
.reduce(
(acc: string[], [key, val]) =>
acc.concat(Array.isArray(val) ? val.map((d) => `${key}${separator}${d}`) : `${key}${separator}${val}`),
[]
)
}
export function arrToObjBySep(arr: string[], sep: string) {
const result: any = {}
arr.forEach((d) => {
const split = d.split(sep)
result[split[0]] = split[1]
})
return result as object
}
/**
* Remove empty values from nested object.
*
* @export
* @param {*} obj
*/
export function sanitize(obj: any) {
function isEmpty(value: any): boolean {
if (!value) {
return true
}
if (Array.isArray(value) && value.length === 0) {
return true
}
if (value instanceof Object) {
return Object.values(value).every(isEmpty)
}
return false
}
return JSON.parse(JSON.stringify(obj, (_, value: any) => (isEmpty(value) ? undefined : value)))
}
| truncate |
main.rs | use std::fs::File;
use std::io::{stdin, stdout};
use std::io::prelude::*;
#[macro_use] extern crate enum_primitive;
extern crate sfml;
use sfml::graphics::{Vertex, VertexArray, Color, RenderTarget, RenderWindow,
PrimitiveType};
use sfml::window::{Key, VideoMode, Event, window_style};
use sfml::system::{Clock, Vector2f};
extern crate clap;
use clap::{Arg, App};
const SCREEN_WIDTH: u32 = 64;
const SCREEN_HEIGHT: u32 = 32;
mod instruction;
mod chip8;
use chip8::CHIP8;
fn main() {
let matches = App::new("CHIP-8")
.version("1.0")
.author("Richard Leek <[email protected]>")
.about("CHIP-8 virtual machine in Rust")
.arg(Arg::with_name("INPUT")
.help("The path to the rom to load")
.required(true)
.index(1))
.arg(Arg::with_name("debug")
.help("Runs the rom in debug mode")
.short("d"))
.arg(Arg::with_name("scale")
.help("Scale of the window")
.value_name("scale")
.short("s"))
.get_matches();
let rom_name = matches.value_of("INPUT").unwrap();
let debug = matches.is_present("debug");
let mut chip = CHIP8::new(&mut File::open(&rom_name).unwrap());
let screen_scale: u32 = {
if matches.is_present("scale") {
matches.value_of("scale").unwrap().parse::<u32>().unwrap()
} else {
8
}
};
if !debug {
let mut window = RenderWindow::new(VideoMode::new_init(SCREEN_WIDTH * screen_scale, SCREEN_HEIGHT * screen_scale, 32),
"CHIP8",
window_style::CLOSE,
&Default::default())
.unwrap();
window.set_vertical_sync_enabled(true);
let mut scene = VertexArray::new_init(PrimitiveType::sfQuads, SCREEN_WIDTH * SCREEN_HEIGHT * 4);
let mut clock = Clock::new();
while !chip.done {
update(&mut chip, &window);
chip.tick(clock.restart().as_seconds());
draw(&chip, &mut window, &mut scene, screen_scale);
}
return;
} else {
println!("Debug Mode.. Press return to step");
loop {
stdout().flush().unwrap();
let mut input = String::new();
stdin().read_line(&mut input).unwrap();
chip.tick(1.0 / 60.0);
println!("{}", chip);
draw_debug(&chip);
}
}
fn update(chip: &mut CHIP8, window: &RenderWindow) {
chip.key[0] = Key::Num1.is_pressed() as u8;
chip.key[1] = Key::Num2.is_pressed() as u8;
chip.key[2] = Key::Num3.is_pressed() as u8;
chip.key[3] = Key::Num4.is_pressed() as u8;
chip.key[4] = Key::Q.is_pressed() as u8;
chip.key[5] = Key::W.is_pressed() as u8;
chip.key[6] = Key::E.is_pressed() as u8;
chip.key[7] = Key::R.is_pressed() as u8;
chip.key[8] = Key::A.is_pressed() as u8;
chip.key[9] = Key::S.is_pressed() as u8;
chip.key[10] = Key::D.is_pressed() as u8;
chip.key[11] = Key::F.is_pressed() as u8;
chip.key[12] = Key::Z.is_pressed() as u8;
chip.key[13] = Key::X.is_pressed() as u8;
chip.key[14] = Key::C.is_pressed() as u8;
chip.key[15] = Key::V.is_pressed() as u8;
for event in window.events() {
match event {
Event::Closed |
Event::KeyPressed { code: Key::Escape, .. } => {
chip.done = true;
},
_ => {}
}
}
}
fn | (chip: &CHIP8) {
if chip.draw {
for y in 0..SCREEN_HEIGHT as u32 {
for x in 0..SCREEN_WIDTH as u32 {
if chip.gfx[(x+y*SCREEN_WIDTH) as usize] == 1 {
print!("#");
} else {
print!(" ");
}
}
print!("\n");
}
}
}
fn draw(chip: &CHIP8, window: &mut RenderWindow, scene: &mut VertexArray, screen_scale: u32) {
if chip.draw {
window.clear(&Color::black());
scene.clear();
for y in 0..SCREEN_HEIGHT as u32 {
for x in 0..SCREEN_WIDTH as u32 {
if chip.gfx[(x+y*SCREEN_WIDTH) as usize] == 1 {
scene.append(&Vertex::new(&Vector2f {
x: x as f32 * screen_scale as f32,
y: y as f32 * screen_scale as f32,
},
&Color::white(), &Vector2f {x:0.0,y:0.0}));
scene.append(&Vertex::new(&Vector2f {
x: (x + 1) as f32 * screen_scale as f32,
y: y as f32 * screen_scale as f32,
},
&Color::white(), &Vector2f {x:0.0,y:0.0}));
scene.append(&Vertex::new(&Vector2f {
x: (x + 1) as f32 * screen_scale as f32,
y: (y + 1) as f32 * screen_scale as f32,
},
&Color::white(), &Vector2f {x:0.0,y:0.0}));
scene.append(&Vertex::new(&Vector2f {
x: x as f32 * screen_scale as f32,
y: (y + 1) as f32 * screen_scale as f32,
},
&Color::white(), &Vector2f {x:0.0,y:0.0}));
}
}
}
window.draw(scene);
window.display();
}
}
}
| draw_debug |
common.go | package main
import (
. "github.com/mmcloughlin/avo/build"
. "github.com/mmcloughlin/avo/operand"
. "github.com/mmcloughlin/avo/reg"
. "github.com/zeebo/blake3/avo"
)
var msgSched = [7][16]int{
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
{2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8},
{3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1},
{10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6},
{12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4},
{9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7},
{11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13},
}
const roundSize = 32
const (
flag_chunkStart = 1 << 0
flag_chunkEnd = 1 << 1
flag_parent = 1 << 2
)
func transpose(c Ctx, alloc *Alloc, vs []*Value) {
L01, H01, L23, H23 := alloc.Value(), alloc.Value(), alloc.Value(), alloc.Value()
L45, H45, L67, H67 := alloc.Value(), alloc.Value(), alloc.Value(), alloc.Value()
VPUNPCKLDQ(vs[1].GetOp(), vs[0].Get(), L01.Get())
VPUNPCKHDQ(vs[1].ConsumeOp(), vs[0].Consume(), H01.Get())
VPUNPCKLDQ(vs[3].GetOp(), vs[2].Get(), L23.Get())
VPUNPCKHDQ(vs[3].ConsumeOp(), vs[2].Consume(), H23.Get())
VPUNPCKLDQ(vs[5].GetOp(), vs[4].Get(), L45.Get())
VPUNPCKHDQ(vs[5].ConsumeOp(), vs[4].Consume(), H45.Get())
VPUNPCKLDQ(vs[7].GetOp(), vs[6].Get(), L67.Get())
VPUNPCKHDQ(vs[7].ConsumeOp(), vs[6].Consume(), H67.Get())
LL0123, HL0123, LH0123, HH0123 := alloc.Value(), alloc.Value(), alloc.Value(), alloc.Value()
LL4567, HL4567, LH4567, HH4567 := alloc.Value(), alloc.Value(), alloc.Value(), alloc.Value()
VPUNPCKLQDQ(L23.GetOp(), L01.Get(), LL0123.Get())
VPUNPCKHQDQ(L23.ConsumeOp(), L01.Consume(), HL0123.Get())
VPUNPCKLQDQ(H23.GetOp(), H01.Get(), LH0123.Get())
VPUNPCKHQDQ(H23.ConsumeOp(), H01.Consume(), HH0123.Get())
VPUNPCKLQDQ(L67.GetOp(), L45.Get(), LL4567.Get())
VPUNPCKHQDQ(L67.ConsumeOp(), L45.Consume(), HL4567.Get())
VPUNPCKLQDQ(H67.GetOp(), H45.Get(), LH4567.Get())
VPUNPCKHQDQ(H67.ConsumeOp(), H45.Consume(), HH4567.Get())
vs[0], vs[1], vs[2], vs[3] = alloc.Value(), alloc.Value(), alloc.Value(), alloc.Value()
vs[4], vs[5], vs[6], vs[7] = alloc.Value(), alloc.Value(), alloc.Value(), alloc.Value()
VINSERTI128(Imm(1), LL4567.Get().(VecPhysical).AsX(), LL0123.Get(), vs[0].Get())
VPERM2I128(Imm(49), LL4567.Consume(), LL0123.Consume(), vs[4].Get())
VINSERTI128(Imm(1), HL4567.Get().(VecPhysical).AsX(), HL0123.Get(), vs[1].Get())
VPERM2I128(Imm(49), HL4567.Consume(), HL0123.Consume(), vs[5].Get())
VINSERTI128(Imm(1), LH4567.Get().(VecPhysical).AsX(), LH0123.Get(), vs[2].Get())
VPERM2I128(Imm(49), LH4567.Consume(), LH0123.Consume(), vs[6].Get())
VINSERTI128(Imm(1), HH4567.Get().(VecPhysical).AsX(), HH0123.Get(), vs[3].Get())
VPERM2I128(Imm(49), HH4567.Consume(), HH0123.Consume(), vs[7].Get())
}
func transposeMsg(c Ctx, alloc *Alloc, block GPVirtual, input, msg Mem) {
for j := 0; j < 2; j++ {
vs := alloc.Values(8)
for i, v := range vs {
VMOVDQU(input.Offset(1024*i+32*j).Idx(block, 1), v.Get())
}
transpose(c, alloc, vs)
for i, v := range vs {
VMOVDQU(v.Consume(), msg.Offset(32*i+256*j))
}
}
}
func transposeMsgN(c Ctx, alloc *Alloc, block GPVirtual, input, msg Mem, j int) {
vs := alloc.Values(8)
for i, v := range vs {
VMOVDQU(input.Offset(1024*i+32*j).Idx(block, 1), v.Get())
}
transpose(c, alloc, vs)
for i, v := range vs {
VMOVDQU(v.Consume(), msg.Offset(32*i+256*j))
}
}
func loadCounter(c Ctx, alloc *Alloc, mem, lo_mem, hi_mem Mem) {
ctr0, ctr1 := alloc.Value(), alloc.Value()
VPBROADCASTQ(mem, ctr0.Get())
VPADDQ(c.Counter, ctr0.Get(), ctr0.Get())
VPBROADCASTQ(mem, ctr1.Get())
VPADDQ(c.Counter.Offset(32), ctr1.Get(), ctr1.Get())
L, H := alloc.Value(), alloc.Value()
VPUNPCKLDQ(ctr1.GetOp(), ctr0.Get(), L.Get())
VPUNPCKHDQ(ctr1.ConsumeOp(), ctr0.Consume(), H.Get())
LLH, HLH := alloc.Value(), alloc.Value()
VPUNPCKLDQ(H.GetOp(), L.Get(), LLH.Get())
VPUNPCKHDQ(H.ConsumeOp(), L.Consume(), HLH.Get())
ctrl, ctrh := alloc.Value(), alloc.Value()
VPERMQ(U8(0b11_01_10_00), LLH.ConsumeOp(), ctrl.Get())
VPERMQ(U8(0b11_01_10_00), HLH.ConsumeOp(), ctrh.Get())
VMOVDQU(ctrl.Consume(), lo_mem)
VMOVDQU(ctrh.Consume(), hi_mem)
}
func finalizeRounds(alloc *Alloc, vs, h_vecs []*Value, h_regs []int) {
finalized := [8]bool{}
finalize:
for j := 0; j < 8; j++ {
free := alloc.FreeReg()
for i, reg := range h_regs {
if reg == free && !finalized[i] {
h_vecs[i] = xorb(alloc, vs[i], vs[8+i])
finalized[i] = true
continue finalize
}
}
for i, f := range finalized[:] {
if !f {
h_vecs[i] = xorb(alloc, vs[i], vs[8+i])
finalized[i] = true
continue finalize
}
}
}
}
func round(c Ctx, alloc *Alloc, vs []*Value, r int, m func(n int) Mem) {
ms := func(ns ...int) (o []Mem) {
for _, n := range ns {
o = append(o, m(msgSched[r][n]))
}
return o
}
partials := []struct {
ms []Mem
tab Mem
rot int
}{ | }
for i, p := range partials {
addms(alloc, p.ms, vs[0:4])
tab := alloc.ValueFrom(p.tab)
for j := 0; j < 4; j++ {
vs[0+j] = add(alloc, vs[4+j], vs[0+j])
vs[12+j] = xor(alloc, vs[0+j], vs[12+j])
vs[12+j] = rotTv(alloc, tab, vs[12+j])
}
tab.Free()
for j := 0; j < 4; j++ {
vs[8+j] = add(alloc, vs[12+j], vs[8+j])
vs[4+j] = xor(alloc, vs[8+j], vs[4+j])
}
rotNs(alloc, p.rot, vs[4:8])
// roll the blocks
if i == 1 {
vs[4], vs[5], vs[6], vs[7] = vs[5], vs[6], vs[7], vs[4]
vs[8], vs[9], vs[10], vs[11] = vs[10], vs[11], vs[8], vs[9]
vs[12], vs[13], vs[14], vs[15] = vs[15], vs[12], vs[13], vs[14]
} else if i == 3 {
vs[4], vs[5], vs[6], vs[7] = vs[7], vs[4], vs[5], vs[6]
vs[8], vs[9], vs[10], vs[11] = vs[10], vs[11], vs[8], vs[9]
vs[12], vs[13], vs[14], vs[15] = vs[13], vs[14], vs[15], vs[12]
}
}
}
func addm(alloc *Alloc, mp Mem, a *Value) *Value {
o := alloc.Value()
VPADDD(mp, a.Consume(), o.Get())
return o
}
func addms(alloc *Alloc, mps []Mem, as []*Value) {
for i, a := range as {
as[i] = addm(alloc, mps[i], a)
}
}
func add(alloc *Alloc, a, b *Value) *Value {
o := alloc.Value()
VPADDD(a.Get(), b.Consume(), o.Get())
return o
}
func xor(alloc *Alloc, a, b *Value) *Value {
o := alloc.Value()
VPXOR(a.Get(), b.Consume(), o.Get())
return o
}
func xorb(alloc *Alloc, a, b *Value) *Value {
o := alloc.Value()
switch {
case a.HasReg():
VPXOR(b.ConsumeOp(), a.Consume(), o.Get())
case b.HasReg():
VPXOR(a.ConsumeOp(), b.Consume(), o.Get())
default:
VPXOR(a.ConsumeOp(), b.Consume(), o.Get())
}
return o
}
func rotN(alloc *Alloc, n int, a *Value) *Value {
tmp, o := alloc.Value(), alloc.Value()
VPSRLD(U8(n), a.Get(), tmp.Get())
VPSLLD(U8(32-n), a.Get(), a.Get())
VPOR(tmp.ConsumeOp(), a.Consume(), o.Get())
return o
}
func rotNs(alloc *Alloc, n int, as []*Value) {
for i, a := range as {
as[i] = rotN(alloc, n, a)
}
}
func rotTv(alloc *Alloc, tab, a *Value) *Value {
o := alloc.Value()
VPSHUFB(tab.GetOp(), a.Consume(), o.Get())
return o
} | {ms(0, 2, 4, 6), c.Rot16, 12},
{ms(1, 3, 5, 7), c.Rot8, 7},
{ms(8, 10, 12, 14), c.Rot16, 12},
{ms(9, 11, 13, 15), c.Rot8, 7}, |
settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
| For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lxb!(o00)qtw0p+6q_vs$01&wtsw(m*s!ol0_6^v*flo^!&ek&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User' | |
app.rs | use std::path::PathBuf;
use iced::{
button, futures::TryFutureExt, scrollable, text_input, Align, Application, Button, Checkbox,
Column, Command, Container, Element, HorizontalAlignment, Length, Row, Scrollable, Settings,
Text, TextInput,
};
use mw_tools::{api, WikiClient};
use crate::style;
pub fn start() {
App::run(Settings::default()).unwrap()
}
#[derive(Debug, Default)]
struct App {
loading: bool,
wk_client: WikiClient,
state: State,
}
#[derive(Debug, Default)]
struct State {
active_tab: Tab,
btn_account: button::State,
btn_delete: button::State,
btn_list: button::State,
btn_upload: button::State,
ln_input: text_input::State,
ln_input_value: String,
lp_input: text_input::State,
lp_input_value: String,
wiki_url_input: text_input::State,
wiki_url_input_value: String,
is_persistent: bool,
login_button: button::State,
file_button: button::State,
execute_button: button::State,
selected_files: Vec<PathBuf>,
upload_scrollable: scrollable::State,
saving: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum Tab {
Account,
Delete,
List,
Upload,
}
impl Default for Tab {
fn default() -> Self {
Tab::Account
}
}
#[derive(Debug, Clone)]
enum Message {
Loaded(Result<SavedState, ()>),
Saved(Result<(), ()>),
TabSelected(Tab),
LoginNameChanged(String),
LoginPasswordChanged(String),
WikiUrlChanged(String),
CheckboxPersistentLogin(bool),
LoginButtonPressed,
LoggedIn(Result<WikiClient, ()>),
FileButtonPressed,
FilesSelected(Result<Vec<PathBuf>, ()>),
ExecuteButtonPressed,
Executed(Result<(), ()>),
}
impl Application for App {
type Executor = iced::executor::Default;
type Message = Message;
type Flags = ();
fn new(_flags: ()) -> (Self, Command<Message>) {
(
Self {
loading: true,
..App::default()
},
Command::perform(SavedState::load(), Message::Loaded),
)
}
fn title(&self) -> String {
String::from("mw-toolbox by FabianLars")
}
fn update(&mut self, message: Message) -> Command<Message> {
match self.loading {
true => {
match message {
Message::Loaded(Ok(state)) => {
self.loading = false;
self.state = State {
ln_input_value: state.ln_input_value,
lp_input_value: state.lp_input_value,
is_persistent: state.is_persistent,
wiki_url_input_value: state.wikiurl,
..State::default()
};
}
Message::Loaded(Err(_)) => {
self.state = State::default();
}
_ => {}
}
Command::none()
}
false => {
match message {
Message::WikiUrlChanged(value) => {
self.state.wiki_url_input_value = value;
}
Message::LoginNameChanged(value) => {
self.state.ln_input_value = value;
}
Message::LoginPasswordChanged(value) => {
self.state.lp_input_value = value;
}
Message::TabSelected(selected) => {
self.state.active_tab = selected;
}
Message::CheckboxPersistentLogin(toggle) => {
self.state.is_persistent = toggle;
}
Message::LoginButtonPressed => {
return Command::perform(
WikiClient::new_logged_in(
self.state.wiki_url_input_value.clone(),
self.state.ln_input_value.clone(),
self.state.lp_input_value.clone(),
)
.map_err(|_| ()),
Message::LoggedIn,
);
}
Message::FileButtonPressed => {
if let Tab::Upload = self.state.active_tab {
return Command::perform(file_dialog(), Message::FilesSelected);
}
}
Message::FilesSelected(files) => {
if let Tab::Upload = self.state.active_tab {
self.state.selected_files = match files {
Ok(p) => p,
Err(_) => Vec::new(),
}
}
}
Message::ExecuteButtonPressed => {
if let Tab::Upload = self.state.active_tab {
return Command::perform(
api::upload::upload(
self.wk_client.clone(),
self.state.selected_files.clone(),
None,
)
.map_err(|_| ()),
Message::Executed,
);
}
}
Message::LoggedIn(res) => {
if let Ok(client) = res {
self.wk_client = client;
return match self.state.is_persistent {
true => Command::perform(
SavedState {
ln_input_value: self.state.ln_input_value.clone(),
lp_input_value: self.state.lp_input_value.clone(),
wikiurl: self.state.wiki_url_input_value.clone(),
is_persistent: self.state.is_persistent,
}
.save(),
Message::Saved,
),
false => Command::perform(
SavedState {
ln_input_value: String::new(),
lp_input_value: String::new(),
wikiurl: self.state.wiki_url_input_value.clone(),
is_persistent: self.state.is_persistent,
}
.save(),
Message::Saved,
),
};
}
}
Message::Saved(_) => |
Message::Executed(res) => println!("{:?}", res),
_ => {}
}
Command::none()
}
}
}
fn view(&mut self) -> Element<Message> {
match self.loading {
true => loading_message(),
false => {
let navbar = Row::new()
.padding(10)
.push(
Button::new(
&mut self.state.btn_account,
Container::new(Text::new("Account")).padding(5),
)
.on_press(Message::TabSelected(Tab::Account)),
)
.push(
Button::new(
&mut self.state.btn_delete,
Container::new(Text::new("Delete")).padding(5),
)
.on_press(Message::TabSelected(Tab::Delete)),
)
.push(
Button::new(
&mut self.state.btn_list,
Container::new(Text::new("List")).padding(5),
)
.on_press(Message::TabSelected(Tab::List)),
)
.push(
Button::new(
&mut self.state.btn_upload,
Container::new(Text::new("Upload")).padding(5),
)
.on_press(Message::TabSelected(Tab::Upload)),
);
let mut text_files = String::new();
for file in &self.state.selected_files {
text_files.push_str(&file.display().to_string());
text_files.push_str("\n");
}
let tab_container = Container::new(match &self.state.active_tab {
Tab::Account => Column::new()
.padding(10)
.spacing(10)
.push(
TextInput::new(
&mut self.state.wiki_url_input,
"Fandom Wiki URL (api.php)",
&self.state.wiki_url_input_value,
Message::WikiUrlChanged,
)
.size(40),
)
.push(
Row::new()
.push(
TextInput::new(
&mut self.state.ln_input,
"Fandom Username",
&self.state.ln_input_value,
Message::LoginNameChanged,
)
.size(40),
)
.push(
TextInput::new(
&mut self.state.lp_input,
"Fandom Password",
&self.state.lp_input_value,
Message::LoginPasswordChanged,
)
.size(40)
.password(),
),
)
.push(
Row::new()
.push(Checkbox::new(
self.state.is_persistent,
"Remember me",
Message::CheckboxPersistentLogin,
))
.push(
Button::new(&mut self.state.login_button, Text::new("Login"))
.on_press(Message::LoginButtonPressed),
),
),
Tab::Upload => Column::new()
.push(
Container::new(
Scrollable::new(&mut self.state.upload_scrollable)
.push(Text::new(text_files)),
)
.width(Length::Fill)
.height(Length::Fill)
.align_x(Align::Center),
)
.push(
Container::new(
Row::new()
.padding(10)
.spacing(20)
.push(
Button::new(
&mut self.state.file_button,
Text::new("Select File(s)"),
)
.on_press(Message::FileButtonPressed),
)
.push(
Button::new(
&mut self.state.execute_button,
Text::new("Execute"),
)
.on_press(Message::ExecuteButtonPressed),
),
)
.width(Length::Fill)
.height(Length::Shrink)
.align_x(Align::Center),
),
_ => Column::new(),
});
let content = Column::new()
.push(navbar.height(Length::FillPortion(1)).width(Length::Fill))
.push(tab_container.height(Length::FillPortion(10)));
Container::new(content)
.width(Length::Fill)
.height(Length::Fill)
.center_x()
.center_y()
.style(style::Theme::Dark)
.into()
}
}
}
}
async fn file_dialog() -> Result<Vec<PathBuf>, ()> {
let result = tokio::task::spawn_blocking(|| {
native_dialog::FileDialog::new()
.show_open_multiple_file()
.unwrap()
})
.await
.map_err(|_| ())?;
let mut temp: Vec<PathBuf> = Vec::new();
for f in result {
temp.push(f);
}
Ok(temp)
}
fn loading_message() -> Element<'static, Message> {
Container::new(
Text::new("Loading...")
.horizontal_alignment(HorizontalAlignment::Center)
.size(50),
)
.width(Length::Fill)
.height(Length::Fill)
.center_y()
.into()
}
#[derive(Debug, Clone)]
struct SavedState {
ln_input_value: String,
wikiurl: String,
lp_input_value: String,
is_persistent: bool,
}
impl SavedState {
async fn load() -> Result<SavedState, ()> {
let ln_input_value = storage::get_secure("b9c95dde").await.unwrap_or_default();
let lp_input_value = storage::get_secure("d7f0942b").await.unwrap_or_default();
let wikiurl = storage::get("wikiurl")
.await
.unwrap_or_else(|_| String::from("https://leagueoflegends.fandom.com/de/api.php"));
let is_persistent = storage::get("is_persistent")
.await
.unwrap_or_else(|_| String::from("false"))
.parse::<bool>()
.unwrap_or(false);
let s = Self {
lp_input_value,
ln_input_value,
wikiurl,
is_persistent,
};
Ok(s)
}
async fn save(self) -> Result<(), ()> {
storage::insert_multiple(&[
(
"b9c95dde",
storage::encrypt(&self.ln_input_value)
.map_err(|e| println!("Error encrypting password: {}", e))?
.as_slice(),
),
(
"d7f0942b",
storage::encrypt(&self.lp_input_value)
.map_err(|e| println!("Error encrypting name: {}", e))?
.as_slice(),
),
("wikiurl", self.wikiurl.as_bytes()),
("is_persistent", self.is_persistent.to_string().as_bytes()),
])
.await
.map_err(|e| println!("Error saving app data: {}", e))
}
}
| {
self.state.saving = false;
} |
prop_material.py | # -*- coding: utf-8 -*-
from bpy.types import Panel
class MMDMaterialPanel(Panel):
bl_idname = 'MATERIAL_PT_mmd_tools_material'
bl_label = 'MMD Material'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'material'
@classmethod
def poll(cls, context):
material = context.active_object.active_material
return material and material.mmd_material
def draw(self, context):
material = context.active_object.active_material
mmd_material = material.mmd_material
layout = self.layout
col = layout.column(align=True)
col.label(text ='Information:')
c = col.column()
r = c.row()
r.prop(mmd_material, 'name_j')
r = c.row()
r.prop(mmd_material, 'name_e')
r = c.row()
r.prop(mmd_material, 'comment')
col = layout.column(align=True)
col.label(text = 'Color:')
c = col.column()
r = c.row()
r.prop(material, 'diffuse_color')
r = c.row()
r.label(text = 'Diffuse Alpha:')
r.prop(material, 'alpha')
r = c.row()
r.prop(mmd_material, 'ambient_color')
r = c.row()
r.prop(material, 'specular_color')
r = c.row()
r.label(text = 'Specular Alpha:')
r.prop(material, 'specular_alpha')
col = layout.column(align=True)
col.label(text = 'Shadow:')
c = col.column()
r = c.row()
r.prop(mmd_material, 'is_double_sided')
r.prop(mmd_material, 'enabled_drop_shadow')
r = c.row()
r.prop(mmd_material, 'enabled_self_shadow_map')
r.prop(mmd_material, 'enabled_self_shadow')
col = layout.column(align=True)
col.label(text = 'Edge:')
c = col.column()
r = c.row()
r.prop(mmd_material, 'enabled_toon_edge')
r.prop(mmd_material, 'edge_weight')
r = c.row()
r.prop(mmd_material, 'edge_color')
class MMDTexturePanel(Panel):
bl_idname = 'MATERIAL_PT_mmd_tools_texture'
bl_label = 'MMD Texture'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'material'
@classmethod
def | (cls, context):
material = context.active_object.active_material
return material and material.mmd_material
def draw(self, context):
material = context.active_object.active_material
mmd_material = material.mmd_material
layout = self.layout
#tex_slots = material.texture_slots.values()
col = layout.column(align=True)
row = col.row(align=True)
row.label(text = 'NoTexture:')
r = row.column(align=True)
'''
if tex_slots[0]:
tex = tex_slots[0].texture
if tex.type == 'IMAGE' and tex.image:
r2 = r.row(align=True)
r2.prop(tex.image, 'filepath', text='')
r2.operator('mmd_tools.material_remove_texture', text='', icon='PANEL_CLOSE')
else:
r.operator('mmd_tools.material_remove_texture', text='Remove', icon='PANEL_CLOSE')
col.label(text = 'Texture is invalid.', icon='ERROR')
else:
r.operator('mmd_tools.material_open_texture', text='Add', icon='FILESEL')
row = col.row(align=True)
row.label(text = 'Sphere Texture:')
r = row.column(align=True)
if tex_slots[1]:
tex = tex_slots[1].texture
if tex.type == 'IMAGE' and tex.image:
r2 = r.row(align=True)
r2.prop(tex.image, 'filepath', text='')
else:
r.operator('mmd_tools.material_remove_sphere_texture', text='Remove', icon='PANEL_CLOSE')
col.label(text = 'Sphere Texture is invalid.', icon='ERROR')
else:
r.operator('mmd_tools.material_open_texture', text='Add', icon='FILESEL')
'''
col = layout.column(align=True)
c = col.column()
r = c.row()
r.prop(mmd_material, 'is_shared_toon_texture')
if mmd_material.is_shared_toon_texture:
r.prop(mmd_material, 'shared_toon_texture')
r = c.row()
r.prop(mmd_material, 'toon_texture')
r = c.row()
r.prop(mmd_material, 'sphere_texture_type')
| poll |
getting-started.page.ts | import { Component, AfterViewInit, ViewChild, HostBinding } from '@angular/core';
import { FormGroup, FormControl } from '@angular/forms';
import { IonSlides, MenuController } from '@ionic/angular';
@Component({
selector: 'app-getting-started',
templateUrl: './getting-started.page.html',
styleUrls: [
'./styles/getting-started.page.scss',
'./styles/getting-started.shell.scss',
'./styles/getting-started.responsive.scss'
]
})
export class GettingStartedPage implements AfterViewInit {
@ViewChild(IonSlides, { static: true }) slides: IonSlides;
@HostBinding('class.last-slide-active') isLastSlide = false;
gettingStartedForm: FormGroup;
constructor(public menu: MenuController) {
this.gettingStartedForm = new FormGroup({
browsingCategory: new FormControl('men'),
followingInterests: new FormGroup({
tops: new FormControl(true),
dresses: new FormControl(),
jeans: new FormControl(),
jackets: new FormControl(true),
shoes: new FormControl(), |
// Disable side menu for this page
ionViewDidEnter(): void {
this.menu.enable(false);
}
// Restore to default when leaving this page
ionViewDidLeave(): void {
this.menu.enable(true);
}
ngAfterViewInit(): void {
// ViewChild is set
this.slides.isEnd().then(isEnd => {
this.isLastSlide = isEnd;
});
// Subscribe to changes
this.slides.ionSlideWillChange.subscribe(changes => {
this.slides.isEnd().then(isEnd => {
this.isLastSlide = isEnd;
});
});
}
} | glasses: new FormControl()
})
});
} |
traits2.rs | // traits2.rs
//
// Your task is to implement the trait
// `AppendBar' for a vector of strings.
//
// To implement this trait, consider for
// a moment what it means to 'append "Bar"'
// to a vector of strings.
//
// No boiler plate code this time,
// you can do this!
trait AppendBar {
fn append_bar(self) -> Self;
}
//TODO: Add your code here
impl AppendBar for Vec<String> {
fn append_bar(self) -> Self {
let mut new_vec = self.clone();
new_vec.push(String::from("Bar"));
new_vec
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_vec_pop_eq_bar() {
let mut foo = vec![String::from("Foo")].append_bar();
assert_eq!(foo.pop().unwrap(), String::from("Bar"));
assert_eq!(foo.pop().unwrap(), String::from("Foo")); | }
} | |
server.rs | use std::cell::RefCell;
use std::collections::{HashMap, VecDeque};
use std::net::SocketAddr;
use std::ops::Deref;
use std::rc::Rc;
use std::result::Result::Ok;
use bitflags::_core::option::Option::Some;
use bitflags::_core::time::Duration;
use nix::unistd::{fork, ForkResult, Pid};
use tokio::net::TcpSocket;
use tokio::sync::OwnedMutexGuard;
use tokio::task::spawn_local;
use tokio::time::Instant;
use tokio::time::interval_at;
use crate::{Bytes, CstError, now_mil};
use crate::conf::Config;
use crate::db::DB;
use crate::link::{Client, Link, SharedLink};
use crate::object::Object;
use crate::replica::replica::{ReplicaIdentity, ReplicaManager};
use crate::resp::Message;
use crate::snapshot::{SNAPSHOT_FLAG_CHECKSUM, SnapshotWriter};
use crate::stats::{incr_clients, Metrics};
pub struct Server {
pub config: &'static Config,
pub addr: String,
pub node_id: u64,
pub node_alias: String,
uuid: u64,
pub expires: HashMap<Bytes, Object>,
pub db: DB,
repl_log: VecDeque<(u64, &'static str, Vec<Message>)>,
latest_repl_uuid_overflowed: Option<u64>,
repl_log_size: u64,
repl_log_size_limit: u64,
pub replicas: ReplicaManager,
// replicas: LWWHash<u64, ReplicaIdentity>,
pub events: EventsProducer,
#[allow(unused)]
events_wather: EventsConsumer,
// latest time a snapshot was dumped, and the replica ids and their uuids we received at that time
snapshot: (u64, HashMap<String, u64>),
latest_dump_time: u64,
latest_dumped_at_uuid: u64,
pub client_chan: tokio::sync::mpsc::Sender<OwnedMutexGuard<Box<dyn Link + Send>>>,
pub metrics: Metrics,
}
pub enum ServerEvent {
Started,
Replicated,
}
impl Server {
pub fn new(config: &'static Config) -> Self {
let (tx, rx) = new_events_chann();
let (c_tx, _) = tokio::sync::mpsc::channel(1);
let identity = ReplicaIdentity{
id: config.node_id,
addr: config.addr.clone(),
alias: config.node_alias.clone(),
};
Server {
node_id: config.node_id,
node_alias: config.node_alias.clone(),
addr: config.addr.clone(),
config,
uuid: 1,
expires: HashMap::new(),
db: DB::empty(),
repl_log: VecDeque::with_capacity(1024),
latest_repl_uuid_overflowed: None,
repl_log_size: 0,
repl_log_size_limit: 1024000,
events: tx,
events_wather: rx,
//replicas: HashMap::new(),
replicas: ReplicaManager::new(identity),
snapshot: (0, Default::default()),
latest_dump_time: 0,
latest_dumped_at_uuid: 0,
client_chan: c_tx,
metrics: Default::default(),
}
}
pub async fn run(c: &'static Config) -> Result<(), std::io::Error> {
let server = Rc::new(RefCell::new(Server::new(c)));
let addr = format!("{}:{}", c.ip, c.port).parse::<SocketAddr>().unwrap();
let socket = TcpSocket::new_v4()?;
socket.set_reuseaddr(true)?;
socket.set_reuseport(false)?;
socket.bind(addr)?;
let listener = socket.listen(c.tcp_backlog)?;
let server_c = server.clone();
let server_cc = server.clone();
let (tx, mut rx) = tokio::sync::mpsc::channel(102400);
server.deref().borrow_mut().client_chan = tx.clone();
spawn_local(async move {
loop {
match listener.accept().await {
Err(e) => {
error!("failed to accept new connection because {}", e);
std::process::exit(-1);
}
Ok((conn, peer_addr)) => {
incr_clients();
let ec = Server::new_event_consumer(server_cc.clone());
let mut sc = SharedLink::from(Client::new(conn, peer_addr.to_string(), ec));
let tx_c = tx.clone();
tokio::task::spawn(async move {
sc.prepare(tx_c).await;
});
}
}
}
});
spawn_local(async move {
Self::cron(server_c).await;
});
while let Some(mut l) = rx.recv().await {
l.serve( &mut *(*server).borrow_mut());
}
Ok(())
}
async fn cron(server: Rc<RefCell<Server>>) {
let mut timer = interval_at(Instant::now() + Duration::from_secs(1), Duration::from_millis(100));
loop {
{
let mut s = server.deref().borrow_mut();
let _ = s.next_uuid(true);
}
let _ = timer.tick().await;
server.deref().borrow_mut().gc();
// check for new replicas
let _ = server.clone();
}
}
fn new_event_consumer(server: Rc<RefCell<Server>>) -> EventsConsumer {
let e = server.deref().borrow().events.events.subscribe();
EventsConsumer{
watching: 0,
events: e,
}
}
// generate a uuid that is associated with the command currently being executed
// this uuid is also used as a timestamp.
// for writing, we always return a bigger uuid.
pub fn next_uuid(&mut self, is_write: bool) -> u64 {
let (time_mil, mut sequnce) = (self.uuid >> 22, (self.uuid & ((1<<22)-1)));
let now = now_mil();
self.uuid = {
if is_write {
if time_mil == now {
sequnce += 1;
} else {
sequnce = 0;
}
}
now << 22 | sequnce
};
self.uuid
}
pub fn current_uuid(&self) -> u64 {
self.uuid
}
pub fn current_time(&self) -> u64 {
self.uuid >> 22
}
pub fn dump_all(&mut self, file_name: String) -> Result<(), CstError> {
debug!("begin to dump, current_dir is {:?}", std::env::current_dir());
let tmp_name = format!("snapshot_{}", chrono::Local::now().timestamp());
let f = std::fs::OpenOptions::new().create(true).write(true).truncate(true).open(tmp_name.clone())?;
debug!("tmp file {} created", tmp_name);
let mut w = SnapshotWriter::new(4096, f);
w.write_bytes(b"CONSTDB")?;
w.write_bytes([0u8, 1, 1, 1].as_ref())?;
// dump my metadatas
let _ = w.write_integer(self.node_id as i64)?
.write_integer(self.node_alias.len() as i64)?
.write_bytes(self.node_alias.as_bytes())?
.write_integer(self.addr.len() as i64)?
.write_bytes(self.addr.as_ref())?
.write_integer(self.get_repl_last_uuid() as i64)?;
// dump the db
self.db.dump(&mut w)?;
self.replicas.dump_snapshot(&mut w)?;
w.write_byte(SNAPSHOT_FLAG_CHECKSUM)?;
let checksum = w.checksum();
w.write_bytes(checksum.to_le_bytes().as_ref())?;
w.flush()?;
debug!("dump finished");
self.latest_dumped_at_uuid = self.current_uuid();
self.latest_dump_time = chrono::Local::now().timestamp() as u64;
std::fs::rename(tmp_name, file_name.clone())?;
debug!("temp_file was renamed to {}", file_name);
Ok(())
}
pub fn get_max_uuid_dumped(&self) -> u64 {
self.latest_dumped_at_uuid
}
pub fn dump_snapshot_in_background(&mut self) -> Result<(Option<Pid>, String, u64), CstError> {
// check for the latest time we've dumped a snapshot
debug!("dumping snapshot in background");
let file_name = "db.snapshot".to_string();
let pid = if self.snapshot.0 > self.get_repl_first_uuid() { // Congratulations! we've dumped a snapshot not long before, so we can use that snapshot
debug!("we've dumped a snapshot not long before, we can use that one!");
None
} else { // we need to dump a fresh snapshot now!
match unsafe { fork() } {
Ok(ForkResult::Child) => {
if let Err(e) = self.dump_all(file_name.clone()) {
error!("unable to dump a snapshot because {}", e);
}
std::process::exit(0);
}
Ok(ForkResult::Parent { child: pid }) => {
debug!("forked a child process {}", pid);
let mut tombstones = self.replicas.replica_progress();
tombstones.insert(self.addr.clone(), self.get_repl_last_uuid());
self.snapshot = (self.get_repl_last_uuid(), tombstones);
Some(pid)
}
Err(e) => {
error!("unable to fork a new process because {}", e);
return Err(CstError::SystemError);
}
}
};
Ok((pid, file_name, self.snapshot.0))
}
}
/**
* data management
*/
impl Server {
pub fn gc(&mut self) {
match self.replicas.min_uuid() {
None => return,
Some(u) => self.db.gc(u),
}
}
}
/*
* the replicate log system
*
*/
impl Server {
pub fn replicate_cmd(&mut self, uuid: u64, cmd_name: &'static str, args: Vec<Message>) {
//let prev_uuid = self.repl_log.back().map(|(x, _, _)| *x).unwrap_or(1);
let s: usize = args.iter().map(|x| x.size()).sum();
self.repl_log.push_back((uuid, cmd_name, args));
self.repl_log_size += s as u64;
while self.repl_log_size > self.repl_log_size_limit {
match self.repl_log.pop_front() {
None => {
error!("the repl_log is empty while repl_log_size is greater than 0");
}
Some((u, _, ms)) => {
let s: usize = ms.iter().map(|x| x.size()).sum();
self.repl_log_size -= s as u64;
self.latest_repl_uuid_overflowed = Some(u);
}
}
}
self.events.trigger(Event::Replicated(uuid));
}
pub fn repl_log_next(&self, uuid: u64) -> Option<(u64, Message)> {
let msg_pos = if uuid == 0 {
if self.latest_repl_uuid_overflowed.is_some() {
None
} else {
Some(0)
}
} else {
self.repl_log_uuid_index(uuid).map(|x| x+1)
};
match msg_pos {
None => return None,
Some(pos) => match self.repl_log.get(pos) {
None => None,
Some((next_uuid, cmd_name, args)) => {
let mut replicates = Vec::with_capacity(args.len() + 5);
replicates.push(Message::BulkString("replicate".into()));
replicates.push(Message::Integer(self.node_id as i64));
replicates.push(Message::Integer(uuid as i64));
replicates.push(Message::Integer(*next_uuid as i64));
replicates.push(Message::BulkString((*cmd_name).into()));
replicates.extend_from_slice(args);
Some((*next_uuid, Message::Array(replicates)))
}
}
}
}
fn repl_log_uuid_index(&self, uuid: u64) -> Option<usize> {
let l = self.repl_log.len();
if l == 0 || self.repl_log[0].0 > uuid {
return None;
}
if uuid > self.repl_log[l-1].0 {
return None;
}
let (mut start, mut end) = (0usize, l-1);
loop {
if end - start == 1 {
if self.repl_log[start].0 == uuid {
return Some(start);
}
if self.repl_log[end].0 == uuid {
return Some(end);
}
return None;
}
let middle = (start + end) / 2;
let uuid_at_middle = self.repl_log[middle].0;
if uuid_at_middle != uuid && middle == start {
return None;
}
if uuid_at_middle > uuid {
end = middle;
} else if uuid_at_middle < uuid {
start = middle;
} else {
return Some(middle)
}
}
}
pub fn repl_log_at(&self, uuid: u64) -> Option<Message> {
match self.repl_log_uuid_index(uuid) {
None => None,
Some(idx) => match self.repl_log.get(idx) {
None => None,
Some((_, cmd_name, args)) => {
let mut cmd = Vec::with_capacity(args.len() + 1);
cmd.push(Message::BulkString(cmd_name.to_string().into()));
cmd.extend_from_slice(args);
Some(Message::Array(cmd))
},
}
}
}
pub fn repl_log_uuids(&self) -> Vec<u64> {
self.repl_log.iter().map(|(x, _, _)| *x).collect()
}
#[inline]
pub fn get_repl_first_uuid(&self) -> u64 {
self.repl_log.front().map(|(u, _, _)| *u).unwrap_or_default()
}
#[inline]
pub fn get_repl_last_uuid(&self) -> u64 {
self.repl_log.back().map(|(u, _, _)| *u).unwrap_or_default()
}
}
#[cfg(test)]
mod test {
use bitflags::_core::time::Duration;
use tokio::macros::support::thread_rng_n;
use crate::conf::Config;
use crate::resp::Message;
use crate::server::Server;
static Conf: Config = Config{
daemon: false,
node_id: 1,
node_alias: String::new(),
ip: String::new(),
port: 9000,
addr: String::new(),
threads: 4,
log: String::new(),
work_dir: String::new(),
tcp_backlog: 1024,
replica_heartbeat_frequency: 0,
replica_gossip_frequency: 0
};
#[test]
fn test_replog() {
let mut server = Server::new(&Conf);
let random_bytes = |size: usize| -> Vec<u8> {
(0..size).map(|_|thread_rng_n(26) + 48).map(|x| x as u8).collect()
};
// let mut uuids = Vec::with_capacity(100);
// for _ in 0..100 {
// let uuid = server.next_uuid(true);
// let args_len = thread_rng_n(5);
// let args: Vec<Message> = (0..args_len).map(|_| random_bytes(thread_rng_n(6) as usize)).map(|x| Message::BulkString(x.into())).collect();
// uuids.push((uuid, random_bytes(8), args));
// std::thread::sleep(Duration::from_millis(1));
// }
// for (uuid, cmd, args) in &uuids {
// server.replicate_cmd(*uuid, , args.clone());
// }
// assert_eq!(server.get_repl_first_uuid(), uuids[0].0);
// assert_eq!(server.get_repl_last_uuid(), uuids[99].0);
// for _ in 0..50 {
// let idx = thread_rng_n(99) as usize;
// let (uuid, cmd, args) = uuids.get(idx).unwrap();
// assert_eq!(server.repl_log_uuid_index(*uuid), Some(idx));
// assert_eq!(server.get_uuid_after(*uuid), Some(uuids[idx+1].0));
// }
}
#[test]
fn test_uuid() |
}
// pub struct EventsProducer {
// replicated: tokio::sync::watch::Sender<u64>,
// replica_acked: tokio::sync::broadcast::Sender<(u64, u64)>,
// deletion: tokio::sync::broadcast::Sender<(Bytes, Option<Bytes>, u64)>,
// }
//
// #[derive(Clone, Debug)]
// pub struct EventsConsumer {
// watching: u8,
// replicated: tokio::sync::watch::Receiver<u64>,
// replica_acked: tokio::sync::broadcast::Receiver<(u64, u64)>, // (uuid, uuid_he_acked)
// deletion: tokio::sync::broadcast::Receiver<(Bytes, Option<Bytes>, u64)>, // (key, field/member, uuid)
// }
//
// pub fn new_events_chann() -> (EventsProducer, EventsConsumer) {
// let (r_tx, r_rx) = tokio::sync::watch::channel(0);
// let (ra_tx, ra_rx) = tokio::sync::broadcast::channel(1024);
// let (d_tx, d_rx) = tokio::sync::broadcast::channel(1024);
// (EventsProducer{
// replicated: r_tx,
// replica_acked: ra_tx,
// deletion: d_tx
// }, EventsConsumer{
// watching: 0,
// replicated: r_rx,
// replica_acked: ra_rx,
// deletion: d_rx
// })
// }
// TODO, optimization needed
pub struct EventsProducer {
events: tokio::sync::broadcast::Sender<Event>,
}
#[derive(Debug)]
pub struct EventsConsumer {
watching: u8,
events: tokio::sync::broadcast::Receiver<Event>,
}
pub fn new_events_chann() -> (EventsProducer, EventsConsumer) {
let (tx, rx) = tokio::sync::broadcast::channel(1024);
(EventsProducer{
events: tx
}, EventsConsumer{
watching: 0,
events: rx
})
}
impl EventsProducer {
fn trigger(&mut self, e: Event) {
let _ = self.events.send(e);
}
pub fn new_consumer(&self) -> EventsConsumer {
EventsConsumer::new(self.events.subscribe())
}
}
#[derive(Debug, Clone)]
pub enum Event {
Replicated(u64),
ReplicaAcked((u64, u64)),
Deleted((Bytes, Option<Bytes>, u64)),
}
pub const EVENT_TYPE_REPLICATED: u8 = 1;
pub const EVENT_TYPE_REPLICA_ACKED: u8 = 1<<1;
pub const EVENT_TYPE_DELETED: u8 = 1<<2;
impl EventsConsumer {
pub fn new(rx: tokio::sync::broadcast::Receiver<Event>) -> Self {
Self{
watching: 0,
events: rx
}
}
pub fn watch(&mut self, t: u8) {
self.watching |= t;
}
pub async fn occured(&mut self) -> Event {
loop {
let e = self.events.recv().await.unwrap();
let flag = match &e {
Event::Replicated(_) => EVENT_TYPE_REPLICATED,
Event::ReplicaAcked(_) => EVENT_TYPE_REPLICA_ACKED,
Event::Deleted(_) => EVENT_TYPE_DELETED,
};
if self.watching & flag > 0 {
return e;
}
}
}
}
| {
let mut server = Server::new(&Conf);
let mut prev = 0;
for _ in 0..1000 {
let c = server.next_uuid(true);
println!("{}, {}", prev, c);
assert!(c > prev);
prev = c;
}
} |
auth-http-header.service.ts | import { HttpEvent, HttpHandler, HttpRequest } from '@angular/common/http';
import { Injectable, OnDestroy } from '@angular/core';
import {
combineLatest,
defer,
EMPTY,
Observable,
queueScheduler,
Subject,
Subscription,
using,
} from 'rxjs';
import {
filter,
map,
observeOn,
pairwise,
shareReplay,
skipWhile,
switchMap,
take,
tap,
withLatestFrom, | import { GlobalMessageType } from '../../../global-message/models/global-message.model';
import { OccEndpointsService } from '../../../occ/services/occ-endpoints.service';
import { RoutingService } from '../../../routing/facade/routing.service';
import { AuthService } from '../facade/auth.service';
import { AuthToken } from '../models/auth-token.model';
import { AuthRedirectService } from './auth-redirect.service';
import { AuthStorageService } from './auth-storage.service';
import { OAuthLibWrapperService } from './oauth-lib-wrapper.service';
/**
* Extendable service for `AuthInterceptor`.
*/
@Injectable({
providedIn: 'root',
})
export class AuthHttpHeaderService implements OnDestroy {
/**
* Indicates whether the access token is being refreshed
*
* @deprecated will be removed in the next major. Use `AuthService.refreshInProgress$` instead.
*/
// TODO:#13421 - legacy, remove this flag
protected refreshInProgress = false;
/**
* Starts the refresh of the access token
*/
protected refreshTokenTrigger$ = new Subject<AuthToken>();
/**
* Internal token streams which reads the latest from the storage.
* Emits the token or `undefined`
*/
protected token$: Observable<
AuthToken | undefined
> = this.authStorageService
.getToken()
.pipe(map((token) => (token?.access_token ? token : undefined)));
/**
* Compares the previous and the new token in order to stop the refresh or logout processes
*/
protected stopProgress$ = this.token$.pipe(
// Keeps the previous and the new token
pairwise(),
tap(([oldToken, newToken]) => {
// if we got the new token we know that either the refresh or logout finished
if (oldToken?.access_token !== newToken?.access_token) {
this.authService.setLogoutProgress(false);
this.authService.setRefreshProgress(false);
}
})
);
/**
* Refreshes the token only if currently there's no refresh nor logout in progress.
* If the refresh token is not present, it triggers the logout process
*/
protected refreshToken$ = this.refreshTokenTrigger$.pipe(
withLatestFrom(
this.authService.refreshInProgress$,
this.authService.logoutInProgress$
),
filter(
([, refreshInProgress, logoutInProgress]) =>
!refreshInProgress && !logoutInProgress
),
tap(([token]) => {
if (token?.refresh_token) {
this.oAuthLibWrapperService.refreshToken();
this.authService.setRefreshProgress(true);
} else {
this.handleExpiredRefreshToken();
}
})
);
/**
* Kicks of the process by listening to the new token and refresh token processes.
* This token should be used when retrying the failed http request.
*/
protected tokenToRetryRequest$ = using(
() => this.refreshToken$.subscribe(),
() => this.getStableToken()
).pipe(shareReplay({ refCount: true, bufferSize: 1 }));
protected subscriptions = new Subscription();
constructor(
protected authService: AuthService,
protected authStorageService: AuthStorageService,
protected oAuthLibWrapperService: OAuthLibWrapperService,
protected routingService: RoutingService,
protected occEndpoints: OccEndpointsService,
protected globalMessageService: GlobalMessageService,
protected authRedirectService: AuthRedirectService
) {
// We need to have stopProgress$ stream active for the whole time,
// so when the logout finishes we finish it's process.
// It could happen when retryToken$ is not active.
this.subscriptions.add(this.stopProgress$.subscribe());
}
/**
* Checks if request should be handled by this service (if it's OCC call).
*/
public shouldCatchError(request: HttpRequest<any>): boolean {
return this.isOccUrl(request.url);
}
public shouldAddAuthorizationHeader(request: HttpRequest<any>): boolean {
const hasAuthorizationHeader = !!this.getAuthorizationHeader(request);
const isOccUrl = this.isOccUrl(request.url);
return !hasAuthorizationHeader && isOccUrl;
}
/**
* Adds `Authorization` header for OCC calls.
*/
public alterRequest(
request: HttpRequest<any>,
token?: AuthToken
): HttpRequest<any> {
const hasAuthorizationHeader = !!this.getAuthorizationHeader(request);
const isOccUrl = this.isOccUrl(request.url);
if (!hasAuthorizationHeader && isOccUrl) {
return request.clone({
setHeaders: {
...this.createAuthorizationHeader(token),
},
});
}
return request;
}
protected isOccUrl(url: string): boolean {
return url.includes(this.occEndpoints.getBaseUrl());
}
protected getAuthorizationHeader(request: HttpRequest<any>): string | null {
const rawValue = request.headers.get('Authorization');
return rawValue;
}
protected createAuthorizationHeader(
token?: AuthToken
): { Authorization: string } | {} {
if (token?.access_token) {
return {
Authorization: `${token.token_type || 'Bearer'} ${token.access_token}`,
};
}
let currentToken: AuthToken | undefined;
this.authStorageService
.getToken()
.subscribe((token) => (currentToken = token))
.unsubscribe();
if (currentToken?.access_token) {
return {
Authorization: `${currentToken.token_type || 'Bearer'} ${
currentToken.access_token
}`,
};
}
return {};
}
/**
* Refreshes access_token and then retries the call with the new token.
*/
public handleExpiredAccessToken(
request: HttpRequest<any>,
next: HttpHandler,
// TODO:#13421 make required
initialToken?: AuthToken
): Observable<HttpEvent<AuthToken>> {
// TODO:#13421 remove this if-statement, and just return the stream.
if (initialToken) {
return this.getValidToken(initialToken).pipe(
switchMap((token) =>
// we break the stream with EMPTY when we don't have the token. This prevents sending the requests with `Authorization: bearer undefined` header
token
? next.handle(this.createNewRequestWithNewToken(request, token))
: EMPTY
)
);
}
// TODO:#13421 legacy - remove in 5.0
return this.handleExpiredToken().pipe(
switchMap((token) => {
return token
? next.handle(this.createNewRequestWithNewToken(request, token))
: EMPTY;
})
);
}
/**
* Logout user, redirected to login page and informs about expired session.
*/
public handleExpiredRefreshToken(): void {
// There might be 2 cases:
// 1. when user is already on some page (router is stable) and performs an UI action
// that triggers http call (i.e. button click to save data in backend)
// 2. when user is navigating to some page and a route guard triggers the http call
// (i.e. guard loading cms page data)
//
// In the second case, we want to remember the anticipated url before we navigate to
// the login page, so we can redirect back to that URL after user authenticates.
this.authRedirectService.saveCurrentNavigationUrl();
// Logout user
// TODO(#9638): Use logout route when it will support passing redirect url
this.authService.coreLogout().finally(() => {
this.routingService.go({ cxRoute: 'login' });
this.globalMessageService.add(
{
key: 'httpHandlers.sessionExpired',
},
GlobalMessageType.MSG_TYPE_ERROR
);
});
}
// TODO:#13421 - remove this method
/**
* Attempts to refresh token if possible.
* If it is not possible calls `handleExpiredRefreshToken`.
*
* @return observable which omits new access_token. (Warn: might never emit!).
*
* @deprecated will be removed in the next major. Use `getValidToken()` instead
*/
protected handleExpiredToken(): Observable<AuthToken | undefined> {
const stream = this.authStorageService.getToken();
let oldToken: AuthToken;
return stream.pipe(
tap((token) => {
if (
token.access_token &&
token.refresh_token &&
!oldToken &&
!this.refreshInProgress
) {
this.refreshInProgress = true;
this.oAuthLibWrapperService.refreshToken();
} else if (!token.refresh_token) {
this.handleExpiredRefreshToken();
}
oldToken = oldToken || token;
}),
filter((token) => oldToken.access_token !== token.access_token),
tap(() => {
this.refreshInProgress = false;
}),
map((token) => (token?.access_token ? token : undefined)),
take(1)
);
}
/**
* Emits the token or `undefined` only when the refresh or the logout processes are finished.
*/
getStableToken(): Observable<AuthToken | undefined> {
return combineLatest([
this.token$,
this.authService.refreshInProgress$,
this.authService.logoutInProgress$,
]).pipe(
observeOn(queueScheduler),
filter(
([_, refreshInProgress, logoutInProgress]) =>
!refreshInProgress && !logoutInProgress
),
switchMap(() => this.token$)
);
}
/**
* Returns a valid access token.
* It will attempt to refresh it if the current one expired; emits after the new one is retrieved.
*/
protected getValidToken(
requestToken: AuthToken
): Observable<AuthToken | undefined> {
return defer(() => {
// flag to only refresh token only on first emission
let refreshTriggered = false;
return this.tokenToRetryRequest$.pipe(
tap((token) => {
// we want to refresh the access token only when it is old.
// this is a guard for the case when there are multiple parallel http calls
if (
token?.access_token === requestToken?.access_token &&
!refreshTriggered
) {
this.refreshTokenTrigger$.next(token);
}
refreshTriggered = true;
}),
skipWhile((token) => token?.access_token === requestToken.access_token),
take(1)
);
});
}
protected createNewRequestWithNewToken(
request: HttpRequest<any>,
token: AuthToken
): HttpRequest<any> {
request = request.clone({
setHeaders: {
Authorization: `${token.token_type || 'Bearer'} ${token.access_token}`,
},
});
return request;
}
ngOnDestroy(): void {
this.subscriptions.unsubscribe();
}
} | } from 'rxjs/operators';
import { GlobalMessageService } from '../../../global-message/facade/global-message.service'; |
ioctl_api.rs | use nix::fcntl::{open, OFlag};
use nix::sys::stat::Mode;
use std::collections::BTreeMap;
use std::fs::File;
use std::io::{self, Write};
use std::os::unix::io::FromRawFd;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Instant;
use utils::framegen::FrameGenerator;
use v4l2r::memory::{MemoryType, MmapHandle};
use v4l2r::{ioctl::*, memory::UserPtrHandle};
use v4l2r::{Format, QueueType::*};
/// Run a sample encoder on device `device_path`, which must be a `vicodec`
/// encoder instance. `lets_quit` will turn to true when Ctrl+C is pressed.
pub fn run<F: FnMut(&[u8])>(
device_path: &Path,
output_mem: MemoryType,
capture_mem: MemoryType,
lets_quit: Arc<AtomicBool>,
stop_after: Option<usize>,
mut save_output: F,
) | {
let mut fd = unsafe {
File::from_raw_fd(
open(device_path, OFlag::O_RDWR | OFlag::O_CLOEXEC, Mode::empty())
.unwrap_or_else(|_| panic!("Cannot open {}", device_path.display())),
)
};
// Check that we are dealing with vicodec.
let caps: Capability = querycap(&fd).expect("Failed to get device capacities");
println!(
"Opened device: {}\n\tdriver: {}\n\tbus: {}\n\tcapabilities: {}",
caps.card, caps.driver, caps.bus_info, caps.capabilities
);
if caps.driver != "vicodec" {
panic!(
"This device is {}, but this test is designed to work with the vicodec driver.",
caps.driver
);
}
// Check whether the driver uses the single or multi-planar API by
// requesting 0 MMAP buffers on the OUTPUT queue. The working queue will
// return a success.
let (output_queue_type, _capture_queue_type, use_multi_planar) =
if reqbufs::<(), _>(&fd, VideoOutput, MemoryType::Mmap, 0).is_ok() {
(VideoOutput, VideoCapture, false)
} else if reqbufs::<(), _>(&fd, VideoOutputMplane, MemoryType::Mmap, 0).is_ok() {
(VideoOutputMplane, VideoCaptureMplane, true)
} else {
panic!("Both single-planar and multi-planar queues are unusable.");
};
println!(
"Multi-planar: {}",
if use_multi_planar { "yes" } else { "no" }
);
let (output_queue, capture_queue) = match use_multi_planar {
false => (VideoOutput, VideoCapture),
true => (VideoOutputMplane, VideoCaptureMplane),
};
// List the output formats.
let out_formats = FormatIterator::new(&fd, output_queue)
.map(|f| (f.pixelformat, f))
.collect::<BTreeMap<_, _>>();
println!("Output formats:");
for (_, fmtdesc) in out_formats.iter() {
println!("\t{}", fmtdesc);
}
// List the capture formats.
let cap_formats = FormatIterator::new(&fd, capture_queue)
.map(|f| (f.pixelformat, f))
.collect::<BTreeMap<_, _>>();
println!("Capture formats:");
for (_, fmtdesc) in cap_formats.iter() {
println!("\t{}", fmtdesc);
}
// We will encode from RGB3 to FWHT.
if !out_formats.contains_key(&b"RGB3".into()) {
panic!("RGB3 format not supported on OUTPUT queue.");
}
if !cap_formats.contains_key(&b"FWHT".into()) {
panic!("FWHT format not supported on CAPTURE queue.");
}
let mut capture_format: Format =
g_fmt(&fd, capture_queue).expect("Failed getting capture format");
// Let's just make sure the encoding format on the CAPTURE queue is FWHT.
capture_format.pixelformat = b"FWHT".into();
println!("Setting capture format: {:?}", capture_format);
let _capture_format: Format =
s_fmt(&mut fd, capture_queue, capture_format).expect("Failed setting capture format");
// We will be happy with 640x480 resolution.
let output_format = Format {
width: 640,
height: 480,
pixelformat: b"RGB3".into(),
..Default::default()
};
println!("Setting output format: {:?}", output_format);
let output_format: Format =
s_fmt(&mut fd, output_queue, output_format).expect("Failed setting output format");
let capture_format: Format = g_fmt(&fd, capture_queue).expect("Failed getting capture format");
println!("Adjusted output format: {:?}", output_format);
println!("Adjusted capture format: {:?}", capture_format);
match output_mem {
MemoryType::Mmap => (),
MemoryType::UserPtr => (),
m => panic!("Unsupported OUTPUT memory type {:?}", m),
}
match capture_mem {
MemoryType::Mmap => (),
m => panic!("Unsupported CAPTURE memory type {:?}", m),
}
// We could run this with as little as one buffer, but let's cycle between
// two for the sake of it.
// For simplicity the OUTPUT buffers will use user memory.
let num_output_buffers: usize =
reqbufs(&fd, output_queue, output_mem, 2).expect("Failed to allocate output buffers");
let num_capture_buffers: usize =
reqbufs(&fd, capture_queue, capture_mem, 2).expect("Failed to allocate capture buffers");
println!(
"Using {} output and {} capture buffers.",
num_output_buffers, num_capture_buffers
);
let mut capture_mappings = Vec::new();
for i in 0..num_capture_buffers {
let query_buf: QueryBuffer =
querybuf(&fd, capture_queue, i).expect("Failed to query buffer");
println!(
"Capture buffer {} at offset 0x{:0x}, length 0x{:0x}",
i, query_buf.planes[0].mem_offset, query_buf.planes[0].length
);
capture_mappings.push(
mmap(
&fd,
query_buf.planes[0].mem_offset,
query_buf.planes[0].length,
)
.expect("Failed to map buffer"),
);
}
let output_image_size = output_format.plane_fmt[0].sizeimage as usize;
let mut output_buffers: Vec<UserPtrHandle<Vec<u8>>> = match output_mem {
MemoryType::Mmap => Default::default(),
MemoryType::UserPtr => std::iter::repeat(vec![0u8; output_image_size])
.take(num_output_buffers)
.map(UserPtrHandle::from)
.collect(),
_ => unreachable!(),
};
// Start streaming.
streamon(&fd, output_queue).expect("Failed to start output queue");
streamon(&fd, capture_queue).expect("Failed to start capture queue");
let mut frame_gen = FrameGenerator::new(
output_format.width as usize,
output_format.height as usize,
output_format.plane_fmt[0].bytesperline as usize,
)
.expect("Failed to create frame generator");
let mut cpt = 0usize;
let mut total_size = 0usize;
let start_time = Instant::now();
// Encode generated frames until Ctrl+c is pressed.
while !lets_quit.load(Ordering::SeqCst) {
if let Some(max_cpt) = stop_after {
if cpt >= max_cpt {
break;
}
}
let output_buffer_index = cpt % num_output_buffers;
let capture_buffer_index = cpt % num_output_buffers;
// Generate the frame data and buffer to queue.
match output_mem {
MemoryType::Mmap => {
let buffer_info: QueryBuffer =
querybuf(&fd, output_queue_type, output_buffer_index)
.expect("Failed to query output buffer");
let plane = &buffer_info.planes[0];
let mut mapping =
mmap(&fd, plane.mem_offset, plane.length).expect("Failed to map output buffer");
frame_gen
.next_frame(&mut mapping)
.expect("Failed to generate frame");
let out_qbuf = QBuffer::<MmapHandle> {
planes: vec![QBufPlane::new(frame_gen.frame_size())],
..Default::default()
};
qbuf(&fd, output_queue, output_buffer_index, out_qbuf)
}
MemoryType::UserPtr => {
let output_buffer = &mut output_buffers[output_buffer_index];
frame_gen
.next_frame(&mut output_buffer.0)
.expect("Failed to generate frame");
let out_qbuf = QBuffer::<UserPtrHandle<Vec<u8>>> {
planes: vec![QBufPlane::new_from_handle(
output_buffer,
output_buffer.0.len(),
)],
..Default::default()
};
qbuf(&fd, output_queue, output_buffer_index, out_qbuf)
}
_ => unreachable!(),
}
.expect("Error queueing output buffer");
let cap_qbuf = QBuffer::<MmapHandle> {
planes: vec![QBufPlane::new(0)],
..Default::default()
};
qbuf(&fd, capture_queue, capture_buffer_index, cap_qbuf)
.expect("Error queueing capture buffer");
// Now dequeue the work that we just scheduled.
// We can disregard the OUTPUT buffer since it does not contain any
// useful data for us.
dqbuf::<(), _>(&fd, output_queue).expect("Failed to dequeue output buffer");
// The CAPTURE buffer, on the other hand, we want to examine more closely.
let cap_dqbuf: DqBuffer =
dqbuf(&fd, capture_queue).expect("Failed to dequeue capture buffer");
let bytes_used = cap_dqbuf.get_first_plane().bytesused() as usize;
total_size = total_size.wrapping_add(bytes_used);
let elapsed = start_time.elapsed();
let fps = cpt as f64 / elapsed.as_millis() as f64 * 1000.0;
print!(
"\rEncoded buffer {:#5}, index: {:#2}), bytes used:{:#6} total encoded size:{:#8} fps: {:#5.2}",
cap_dqbuf.sequence(), cap_dqbuf.index(), bytes_used, total_size, fps
);
io::stdout().flush().unwrap();
save_output(&capture_mappings[cap_dqbuf.index() as usize].as_ref()[0..bytes_used]);
cpt = cpt.wrapping_add(1);
}
// Stop streaming.
streamoff(&fd, capture_queue).expect("Failed to stop capture queue");
streamoff(&fd, output_queue).expect("Failed to stop output queue");
// Clear the mappings
drop(capture_mappings);
// Free the buffers.
reqbufs::<(), _>(&fd, capture_queue, MemoryType::Mmap, 0)
.expect("Failed to release capture buffers");
reqbufs::<(), _>(&fd, output_queue, MemoryType::UserPtr, 0)
.expect("Failed to release output buffers");
// The fd will be closed as the File instance gets out of scope.
} |
|
WEMA.ts | import { Indicator, IndicatorInput } from "../indicator/indicator";
import { LinkedList } from "../Utils/LinkedList";
import { MAInput, SMA } from "./SMA";
export class WEMA extends Indicator {
public static calculate = wema;
public period: number | undefined;
public price: number[] | undefined;
public result: number[];
public generator: IterableIterator<number | undefined>;
constructor(input: MAInput) {
super(input);
const period = input.period;
const priceArray = input.values;
const exponent = 1 / period;
let sma: SMA;
this.result = [];
sma = new SMA({period, values : []});
const genFn = (function*(): IterableIterator<number | undefined> {
let tick = yield;
let prevEma;
while (true) {
if (prevEma !== undefined && tick !== undefined) {
prevEma = ((tick - prevEma) * exponent) + prevEma;
tick = yield prevEma;
} else {
tick = yield;
prevEma = sma.nextValue(tick);
if (prevEma !== undefined) {
tick = yield prevEma;
}
}
}
});
this.generator = genFn();
this.generator.next();
this.generator.next();
priceArray.forEach((tick) => {
const result = this.generator.next(tick);
if (result.value !== undefined) {
this.result.push(this.format(result.value));
}
});
}
public nextValue(price: number): number | undefined {
const result = this.generator.next(price).value;
if (result !== undefined) {
return this.format(result);
}
}
}
export function | (input: MAInput): number[] {
Indicator.reverseInputs(input);
const result = new WEMA(input).result;
if (input.reversedInput) {
result.reverse();
}
Indicator.reverseInputs(input);
return result;
}
| wema |
tests.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import mock
from django.test.utils import override_settings
from allauth.socialaccount.models import SocialAccount
from allauth.socialaccount.providers import registry
from allauth.socialaccount.tests import create_oauth2_tests
from allauth.tests import MockedResponse, patch
from .provider import BitbucketOAuth2Provider
@override_settings(SOCIALACCOUNT_QUERY_EMAIL=True)
class BitbucketOAuth2Tests(
create_oauth2_tests(registry.by_id(BitbucketOAuth2Provider.id))
):
response_data = """
{
"created_on": "2011-12-20T16:34:07.132459+00:00",
"display_name": "tutorials account",
"links": {
"avatar": {
"href":
"https://bitbucket-assetroot.s3.amazonaws.com/c/photos/2013/Nov/25/tutorials-avatar-1563784409-6_avatar.png"
},
"followers": {
"href":
"https://api.bitbucket.org/2.0/users/tutorials/followers"
},
"following": {
"href":
"https://api.bitbucket.org/2.0/users/tutorials/following"
},
"html": {
"href": "https://bitbucket.org/tutorials"
},
"repositories": {
"href":
"https://api.bitbucket.org/2.0/repositories/tutorials"
},
"self": {
"href": "https://api.bitbucket.org/2.0/users/tutorials"
}
},
"location": "Santa Monica, CA",
"type": "user",
"username": "tutorials",
"uuid": "{c788b2da-b7a2-404c-9e26-d3f077557007}",
"website": "https://tutorials.bitbucket.org/"
}
""" # noqa
email_response_data = """
{
"page": 1,
"pagelen": 10,
"size": 1,
"values": [
{
"email": "[email protected]",
"is_confirmed": true,
"is_primary": true,
"links": {
"self": {
"href":
"https://api.bitbucket.org/2.0/user/emails/[email protected]"
}
},
"type": "email"
},
{
"email": "[email protected]",
"is_confirmed": true,
"is_primary": true,
"links": {
"self": {
"href":
"https://api.bitbucket.org/2.0/user/emails/[email protected]"
}
},
"type": "email"
}
]
}
""" # noqa
def setUp(self):
super(BitbucketOAuth2Tests, self).setUp()
self.mocks = {
"requests": patch(
"allauth.socialaccount.providers" ".bitbucket_oauth2.views.requests"
)
}
self.patches = {name: mocked.start() for (name, mocked) in self.mocks.items()}
self.patches["requests"].get.side_effect = [
MockedResponse(200, self.response_data),
MockedResponse(200, self.email_response_data),
]
def tearDown(self):
|
def get_mocked_response(self):
return [MockedResponse(200, self.response_data)]
def test_account_tokens(self, multiple_login=False):
if multiple_login:
self.patches["requests"].get.side_effect = [
MockedResponse(200, self.response_data),
MockedResponse(200, self.email_response_data),
MockedResponse(200, self.response_data),
MockedResponse(200, self.email_response_data),
]
super(BitbucketOAuth2Tests, self).test_account_tokens(multiple_login)
calls = [
mock.call("https://api.bitbucket.org/2.0/user", params=mock.ANY),
mock.call("https://api.bitbucket.org/2.0/user/emails", params=mock.ANY),
]
if multiple_login:
calls.extend(
[
mock.call("https://api.bitbucket.org/2.0/user", params=mock.ANY),
mock.call(
"https://api.bitbucket.org/2.0/user/emails",
params=mock.ANY,
),
]
)
self.patches["requests"].get.assert_has_calls(calls)
def test_provider_account(self):
self.login(self.get_mocked_response())
socialaccount = SocialAccount.objects.get(uid="tutorials")
self.assertEqual(socialaccount.user.username, "tutorials")
self.assertEqual(socialaccount.user.email, "[email protected]")
account = socialaccount.get_provider_account()
self.assertEqual(account.to_str(), "tutorials account")
self.assertEqual(account.get_profile_url(), "https://bitbucket.org/tutorials")
self.assertEqual(
account.get_avatar_url(),
"https://bitbucket-assetroot.s3.amazonaws.com/c/photos/2013/Nov/25/tutorials-avatar-1563784409-6_avatar.png", # noqa
)
self.patches["requests"].get.assert_has_calls(
[
mock.call("https://api.bitbucket.org/2.0/user", params=mock.ANY),
mock.call(
"https://api.bitbucket.org/2.0/user/emails",
params=mock.ANY,
),
]
)
| for (_, mocked) in self.mocks.items():
mocked.stop() |
iotHubResource.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20160203
import (
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// The description of the IoT hub.
type IotHubResource struct {
pulumi.CustomResourceState
// The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
Etag pulumi.StringPtrOutput `pulumi:"etag"`
// The resource location.
Location pulumi.StringOutput `pulumi:"location"`
// The resource name.
Name pulumi.StringOutput `pulumi:"name"`
// The properties of an IoT hub.
Properties IotHubPropertiesResponseOutput `pulumi:"properties"`
// The name of the resource group that contains the IoT hub. A resource group name uniquely identifies the resource group within the subscription.
Resourcegroup pulumi.StringOutput `pulumi:"resourcegroup"`
// Information about the SKU of the IoT hub.
Sku IotHubSkuInfoResponseOutput `pulumi:"sku"`
// The subscription identifier.
Subscriptionid pulumi.StringOutput `pulumi:"subscriptionid"`
// The resource tags.
Tags pulumi.StringMapOutput `pulumi:"tags"`
// The resource type.
Type pulumi.StringOutput `pulumi:"type"`
}
// NewIotHubResource registers a new resource with the given unique name, arguments, and options.
func NewIotHubResource(ctx *pulumi.Context,
name string, args *IotHubResourceArgs, opts ...pulumi.ResourceOption) (*IotHubResource, error) {
if args == nil || args.Location == nil {
return nil, errors.New("missing required argument 'Location'")
}
if args == nil || args.ResourceGroupName == nil {
return nil, errors.New("missing required argument 'ResourceGroupName'")
}
if args == nil || args.ResourceName == nil {
return nil, errors.New("missing required argument 'ResourceName'")
}
if args == nil || args.Resourcegroup == nil {
return nil, errors.New("missing required argument 'Resourcegroup'")
}
if args == nil || args.Sku == nil {
return nil, errors.New("missing required argument 'Sku'")
}
if args == nil || args.Subscriptionid == nil {
return nil, errors.New("missing required argument 'Subscriptionid'") | }
if args == nil {
args = &IotHubResourceArgs{}
}
aliases := pulumi.Aliases([]pulumi.Alias{
{
Type: pulumi.String("azure-nextgen:devices/latest:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20170119:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20170701:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20180122:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20180401:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20181201preview:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20190322:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20190322preview:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20190701preview:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20191104:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20200301:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20200401:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20200615:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20200710preview:IotHubResource"),
},
{
Type: pulumi.String("azure-nextgen:devices/v20200801:IotHubResource"),
},
})
opts = append(opts, aliases)
var resource IotHubResource
err := ctx.RegisterResource("azure-nextgen:devices/v20160203:IotHubResource", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetIotHubResource gets an existing IotHubResource resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetIotHubResource(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *IotHubResourceState, opts ...pulumi.ResourceOption) (*IotHubResource, error) {
var resource IotHubResource
err := ctx.ReadResource("azure-nextgen:devices/v20160203:IotHubResource", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering IotHubResource resources.
type iotHubResourceState struct {
// The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
Etag *string `pulumi:"etag"`
// The resource location.
Location *string `pulumi:"location"`
// The resource name.
Name *string `pulumi:"name"`
// The properties of an IoT hub.
Properties *IotHubPropertiesResponse `pulumi:"properties"`
// The name of the resource group that contains the IoT hub. A resource group name uniquely identifies the resource group within the subscription.
Resourcegroup *string `pulumi:"resourcegroup"`
// Information about the SKU of the IoT hub.
Sku *IotHubSkuInfoResponse `pulumi:"sku"`
// The subscription identifier.
Subscriptionid *string `pulumi:"subscriptionid"`
// The resource tags.
Tags map[string]string `pulumi:"tags"`
// The resource type.
Type *string `pulumi:"type"`
}
type IotHubResourceState struct {
// The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
Etag pulumi.StringPtrInput
// The resource location.
Location pulumi.StringPtrInput
// The resource name.
Name pulumi.StringPtrInput
// The properties of an IoT hub.
Properties IotHubPropertiesResponsePtrInput
// The name of the resource group that contains the IoT hub. A resource group name uniquely identifies the resource group within the subscription.
Resourcegroup pulumi.StringPtrInput
// Information about the SKU of the IoT hub.
Sku IotHubSkuInfoResponsePtrInput
// The subscription identifier.
Subscriptionid pulumi.StringPtrInput
// The resource tags.
Tags pulumi.StringMapInput
// The resource type.
Type pulumi.StringPtrInput
}
func (IotHubResourceState) ElementType() reflect.Type {
return reflect.TypeOf((*iotHubResourceState)(nil)).Elem()
}
type iotHubResourceArgs struct {
// The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
Etag *string `pulumi:"etag"`
// The resource location.
Location string `pulumi:"location"`
// The properties of an IoT hub.
Properties *IotHubProperties `pulumi:"properties"`
// The name of the resource group that contains the IoT hub.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The name of the IoT hub to create or update.
ResourceName string `pulumi:"resourceName"`
// The name of the resource group that contains the IoT hub. A resource group name uniquely identifies the resource group within the subscription.
Resourcegroup string `pulumi:"resourcegroup"`
// Information about the SKU of the IoT hub.
Sku IotHubSkuInfo `pulumi:"sku"`
// The subscription identifier.
Subscriptionid string `pulumi:"subscriptionid"`
// The resource tags.
Tags map[string]string `pulumi:"tags"`
}
// The set of arguments for constructing a IotHubResource resource.
type IotHubResourceArgs struct {
// The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
Etag pulumi.StringPtrInput
// The resource location.
Location pulumi.StringInput
// The properties of an IoT hub.
Properties IotHubPropertiesPtrInput
// The name of the resource group that contains the IoT hub.
ResourceGroupName pulumi.StringInput
// The name of the IoT hub to create or update.
ResourceName pulumi.StringInput
// The name of the resource group that contains the IoT hub. A resource group name uniquely identifies the resource group within the subscription.
Resourcegroup pulumi.StringInput
// Information about the SKU of the IoT hub.
Sku IotHubSkuInfoInput
// The subscription identifier.
Subscriptionid pulumi.StringInput
// The resource tags.
Tags pulumi.StringMapInput
}
func (IotHubResourceArgs) ElementType() reflect.Type {
return reflect.TypeOf((*iotHubResourceArgs)(nil)).Elem()
} | |
asset.store.ts | import { observable, action } from 'mobx';
import * as Api from '../api/asset.api';
import * as CoinTool from '@/utils/cointool';
import { IAsset, INep5Asset, IAssetList } from '../interface/asset.interface';
class Asset {
@observable public assetList:IAssetList[] = []; // asset列表
@observable public nep5List:IAssetList[] = []; // nep5列表
/**
* 获取asset
*/
@action public async getAssetList() {
let result: any = null;
try {
result = await Api.getallasset();
} catch (error) {
this.assetList = [];
return false;
}
const arr:IAsset[] = result || [];
if(arr.length !== 0){
this.assetList = arr.map((key) =>
{
const newObject = {
asset:CoinTool.toChangeAssetName(key),
id:key.id,
type:key.type,
available:key.available.toString(),
precision:key.precision
}
return newObject;
})
}
return true;
}
/**
* 获取nep5
*/
@action public async getNep5List() {
let result: any = null;
try {
result = await Api.getallnep5();
} catch (error) {
this.nep5List = [];
return false;
}
const arr:INep5Asset[] = result || [];
if(arr.length !== 0){
this.nep5List = arr.map((key) =>
{
const newObject = {
asset:key.name,
id:key.assetid,
type:"Nep5", | precision:key.decimals
}
return newObject;
})
}
return true;
}
}
export default new Asset(); | available:key.totalsupply, |
session-handler.ts | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
import {SessionHandler} from '../../backend';
import {Graph} from '../../graph';
import {Logger} from '../../instrument';
import {Operator} from '../../operators';
import {OpSet, resolveOperator} from '../../opset';
import {Session} from '../../session';
import {Tensor} from '../../tensor';
import {WebGLBackend} from '../backend-webgl';
import {WebGLInferenceHandler} from './inference-handler';
import {WEBGL_OP_RESOLVE_RULES} from './op-resolve-rules';
import {ProgramManager} from './program-manager';
import {PreferLogicalStrategy, TextureLayoutStrategy} from './texture-layout-strategy';
import {TextureManager} from './texture-manager';
import {TextureData, WebGLOperator} from './types';
export class | implements SessionHandler {
programManager: ProgramManager;
textureManager: TextureManager;
layoutStrategy: TextureLayoutStrategy;
textureDataCache: Map<Tensor.Id, TextureData>;
initializers: Set<Tensor.Id>;
packOpCache: Map<string, WebGLOperator>;
unpackOpCache: Map<string, WebGLOperator>;
pack?: boolean;
constructor(public readonly backend: WebGLBackend, public readonly context: Session.Context) {
this.layoutStrategy = new PreferLogicalStrategy(backend.glContext.maxTextureSize);
this.programManager = new ProgramManager(this.context.profiler, backend.glContext, this.layoutStrategy);
this.textureManager = new TextureManager(
backend.glContext, this.layoutStrategy, this.context.profiler,
{reuseTextures: backend.textureCacheMode === 'full'});
this.textureDataCache = new Map();
this.packOpCache = new Map();
this.unpackOpCache = new Map();
this.pack = backend.pack;
}
createInferenceHandler() {
return new WebGLInferenceHandler(this);
}
onGraphInitialized(graph: Graph): void {
const initializers = graph.getValues().filter(v => v.from === -1 && v.tensor).map(v => v.tensor!.dataId);
this.initializers = new Set(initializers);
}
isInitializer(tensorId: Tensor.Id): boolean {
return this.initializers ? this.initializers.has(tensorId) : false;
}
getTextureData(tensorId: Tensor.Id): TextureData|undefined {
return this.textureDataCache.get(tensorId);
}
setTextureData(tensorId: Tensor.Id, textureData: TextureData): void {
Logger.verbose('WebGLSessionHandler', 'Storing Texture data in cache');
this.textureDataCache.set(tensorId, textureData);
}
dispose(): void {
this.programManager.dispose();
this.textureManager.clearActiveTextures();
this.textureDataCache.forEach(td => this.textureManager.releaseTexture(td, true));
this.textureDataCache = new Map();
}
resolve(node: Graph.Node, opsets: ReadonlyArray<OpSet>, graph: Graph): Operator {
const op = resolveOperator(node, opsets, WEBGL_OP_RESOLVE_RULES);
op.initialize(node.attributes, node, graph);
return op;
}
}
| WebGLSessionHandler |
target.rs | use std::str::FromStr;
use num_bigint::BigUint;
use nimiq_block::*;
use fixed_unsigned::types::FixedUnsigned10;
use num_traits::pow;
use primitives::policy;
#[test]
fn | () {
assert_eq!(Target::from(TargetCompact::from(0x1f010000)), [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].into());
assert_eq!(Target::from(TargetCompact::from(0x1e010000)), [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].into());
assert_eq!(Target::from(TargetCompact::from(0x1f000100)), [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].into());
assert_eq!(Target::from(TargetCompact::from(0x01000001)), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1].into());
assert_eq!(Target::from(TargetCompact::from(0x0200ffff)), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff].into());
assert_eq!(Target::from(TargetCompact::from(0x037fffff)), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x7f, 0xff, 0xff].into());
assert_eq!(Target::from(TargetCompact::from(0x0380ffff)), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0xff, 0xff].into());
assert_eq!(Target::from(TargetCompact::from(0x040080ff)), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0xff, 0].into());
}
#[test]
fn it_correctly_calculates_compact_from_target() {
assert_eq!(TargetCompact::from(Target::from([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])), 0x1f010000.into());
assert_eq!(TargetCompact::from(Target::from([0, 0, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])), 0x1f008000.into());
assert_eq!(TargetCompact::from(Target::from([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])), 0x1e010000.into());
assert_eq!(TargetCompact::from(Target::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1])), 0x01000001.into());
assert_eq!(TargetCompact::from(Target::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])), 0x0200ffff.into());
assert_eq!(TargetCompact::from(Target::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x7f, 0xff, 0xff])), 0x037fffff.into());
assert_eq!(TargetCompact::from(Target::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0xff, 0xff])), 0x040080ff.into());
}
#[test]
fn it_correctly_converts_from_fixed_unsigned_to_target() {
assert_eq!(Target::from(FixedUnsigned10::from(pow(BigUint::from(2u64), 240))), [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].into());
assert_eq!(Target::from(FixedUnsigned10::from(1u64)), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1].into());
assert_eq!(Target::from(FixedUnsigned10::from_str("65535.923382").unwrap()), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff].into());
}
#[test]
fn it_correctly_converts_from_target_to_fixed_unsigned() {
assert_eq!(FixedUnsigned10::from(pow(BigUint::from(2u64), 240)), Target::from([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).into());
assert_eq!(FixedUnsigned10::from(1u64), Target::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]).into());
assert_eq!(FixedUnsigned10::from(65535u64), Target::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff]).into());
}
#[test]
fn it_correctly_calculates_target_from_difficulty() {
assert_eq!(Target::from(Difficulty::from(1u64)), [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].into());
assert_eq!(Target::from(Difficulty::from(256u64)), [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0].into());
assert_eq!(Target::from(Difficulty::from(policy::BLOCK_TARGET_MAX.clone())), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1].into());
}
#[test]
fn it_correctly_calculates_compact_from_difficulty() {
assert_eq!(TargetCompact::from(Difficulty::from(1u64)), 0x1f010000.into());
assert_eq!(TargetCompact::from(Difficulty::from(250u64)), 0x1e010624.into());
assert_eq!(TargetCompact::from(Difficulty::from(256u64)), 0x1e010000.into());
assert_eq!(TargetCompact::from(Difficulty::from(FixedUnsigned10::from(pow(BigUint::from(2u64), 32) - 1u64))), 0x1b010000.into());
assert_eq!(TargetCompact::from(Difficulty::from(FixedUnsigned10::from(pow(BigUint::from(2u64), 53) - 1u64))), 0x18080000.into());
assert_eq!(TargetCompact::from(Difficulty::from(policy::BLOCK_TARGET_MAX.clone())), 0x01000001.into());
}
#[test]
fn it_correctly_calculates_target_depth() {
assert_eq!(Target::from(TargetCompact::from(0x1f010000)).get_depth(), 0);
assert_eq!(Target::from(TargetCompact::from(0x1f008f00)).get_depth(), 0);
assert_eq!(Target::from(TargetCompact::from(0x1e800000)).get_depth(), 1);
assert_eq!(Target::from(TargetCompact::from(0x1e600000)).get_depth(), 1);
assert_eq!(Target::from(TargetCompact::from(0x1e400000)).get_depth(), 2);
assert_eq!(Target::from(TargetCompact::from(0x01000002)).get_depth(), 239);
assert_eq!(Target::from(TargetCompact::from(0x01000001)).get_depth(), 240);
}
| it_correctly_calculates_target_from_compact |
blackhole.rs | use crate::{
buffers::Acker,
event::{self, Event},
topology::config::{DataType, SinkConfig, SinkDescription},
};
use futures::{future, AsyncSink, Future, Poll, Sink, StartSend};
use serde::{Deserialize, Serialize};
pub struct BlackholeSink {
total_events: usize,
total_raw_bytes: usize,
config: BlackholeConfig,
acker: Acker,
}
#[derive(Deserialize, Serialize, Debug, Clone)]
pub struct BlackholeConfig {
pub print_amount: usize,
}
inventory::submit! {
SinkDescription::new_without_default::<BlackholeConfig>("blackhole")
}
#[typetag::serde(name = "blackhole")]
impl SinkConfig for BlackholeConfig {
fn | (&self, acker: Acker) -> crate::Result<(super::RouterSink, super::Healthcheck)> {
let sink = Box::new(BlackholeSink::new(self.clone(), acker));
let healthcheck = Box::new(healthcheck());
Ok((sink, healthcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
fn sink_type(&self) -> &'static str {
"blackhole"
}
}
fn healthcheck() -> impl Future<Item = (), Error = crate::Error> {
future::ok(())
}
impl BlackholeSink {
pub fn new(config: BlackholeConfig, acker: Acker) -> Self {
BlackholeSink {
config,
total_events: 0,
total_raw_bytes: 0,
acker,
}
}
}
impl Sink for BlackholeSink {
type SinkItem = Event;
type SinkError = ();
fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
let message_len = item
.as_log()
.get(&event::MESSAGE)
.map(|v| v.as_bytes().len())
.unwrap_or(0);
self.total_events += 1;
self.total_raw_bytes += message_len;
trace!(raw_bytes_counter = message_len, events_counter = 1);
if self.total_events % self.config.print_amount == 0 {
info!({
events = self.total_events,
raw_bytes_collected = self.total_raw_bytes
}, "Total events collected");
}
self.acker.ack(1);
Ok(AsyncSink::Ready)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
Ok(().into())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::buffers::Acker;
use crate::test_util::random_events_with_stream;
use crate::topology::config::SinkConfig;
#[test]
fn blackhole() {
let config = BlackholeConfig { print_amount: 10 };
let (sink, _) = config.build(Acker::Null).unwrap();
let (_input_lines, events) = random_events_with_stream(100, 10);
let _ = sink.send_all(events).wait().unwrap();
}
}
| build |
enums.go | package customsearch
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// ErrorCode enumerates the values for error code.
type ErrorCode string
const (
// InsufficientAuthorization ...
InsufficientAuthorization ErrorCode = "InsufficientAuthorization"
// InvalidAuthorization ...
InvalidAuthorization ErrorCode = "InvalidAuthorization"
// InvalidRequest ...
InvalidRequest ErrorCode = "InvalidRequest"
// None ...
None ErrorCode = "None"
// RateLimitExceeded ...
RateLimitExceeded ErrorCode = "RateLimitExceeded"
// ServerError ...
ServerError ErrorCode = "ServerError"
)
// PossibleErrorCodeValues returns an array of possible values for the ErrorCode const type.
func PossibleErrorCodeValues() []ErrorCode {
return []ErrorCode{InsufficientAuthorization, InvalidAuthorization, InvalidRequest, None, RateLimitExceeded, ServerError}
}
// ErrorSubCode enumerates the values for error sub code.
type ErrorSubCode string
const (
// AuthorizationDisabled ...
AuthorizationDisabled ErrorSubCode = "AuthorizationDisabled"
// AuthorizationExpired ...
AuthorizationExpired ErrorSubCode = "AuthorizationExpired"
// AuthorizationMissing ...
AuthorizationMissing ErrorSubCode = "AuthorizationMissing"
// AuthorizationRedundancy ...
AuthorizationRedundancy ErrorSubCode = "AuthorizationRedundancy"
// Blocked ...
Blocked ErrorSubCode = "Blocked"
// HTTPNotAllowed ...
HTTPNotAllowed ErrorSubCode = "HttpNotAllowed"
// NotImplemented ...
NotImplemented ErrorSubCode = "NotImplemented"
// ParameterInvalidValue ...
ParameterInvalidValue ErrorSubCode = "ParameterInvalidValue"
// ParameterMissing ...
ParameterMissing ErrorSubCode = "ParameterMissing"
// ResourceError ...
ResourceError ErrorSubCode = "ResourceError"
// UnexpectedError ...
UnexpectedError ErrorSubCode = "UnexpectedError"
)
// PossibleErrorSubCodeValues returns an array of possible values for the ErrorSubCode const type.
func PossibleErrorSubCodeValues() []ErrorSubCode {
return []ErrorSubCode{AuthorizationDisabled, AuthorizationExpired, AuthorizationMissing, AuthorizationRedundancy, Blocked, HTTPNotAllowed, NotImplemented, ParameterInvalidValue, ParameterMissing, ResourceError, UnexpectedError}
}
// SafeSearch enumerates the values for safe search.
type SafeSearch string
const (
// Moderate ...
Moderate SafeSearch = "Moderate"
// Off ...
Off SafeSearch = "Off"
// Strict ...
Strict SafeSearch = "Strict"
)
// PossibleSafeSearchValues returns an array of possible values for the SafeSearch const type.
func PossibleSafeSearchValues() []SafeSearch {
return []SafeSearch{Moderate, Off, Strict}
}
// TextFormat enumerates the values for text format.
type TextFormat string
const (
// HTML ...
HTML TextFormat = "Html"
// Raw ...
Raw TextFormat = "Raw"
)
// PossibleTextFormatValues returns an array of possible values for the TextFormat const type.
func | () []TextFormat {
return []TextFormat{HTML, Raw}
}
// Type enumerates the values for type.
type Type string
const (
// TypeAnswer ...
TypeAnswer Type = "Answer"
// TypeCreativeWork ...
TypeCreativeWork Type = "CreativeWork"
// TypeErrorResponse ...
TypeErrorResponse Type = "ErrorResponse"
// TypeIdentifiable ...
TypeIdentifiable Type = "Identifiable"
// TypeResponse ...
TypeResponse Type = "Response"
// TypeResponseBase ...
TypeResponseBase Type = "ResponseBase"
// TypeSearchResponse ...
TypeSearchResponse Type = "SearchResponse"
// TypeSearchResultsAnswer ...
TypeSearchResultsAnswer Type = "SearchResultsAnswer"
// TypeThing ...
TypeThing Type = "Thing"
// TypeWebPage ...
TypeWebPage Type = "WebPage"
// TypeWebWebAnswer ...
TypeWebWebAnswer Type = "Web/WebAnswer"
)
// PossibleTypeValues returns an array of possible values for the Type const type.
func PossibleTypeValues() []Type {
return []Type{TypeAnswer, TypeCreativeWork, TypeErrorResponse, TypeIdentifiable, TypeResponse, TypeResponseBase, TypeSearchResponse, TypeSearchResultsAnswer, TypeThing, TypeWebPage, TypeWebWebAnswer}
}
| PossibleTextFormatValues |
index.js | import img1 from './img1.jpg'
import img2 from './img2.jpg'
import img3 from './img3.jpg'
import img4 from './img4.jpg'
import img5 from './img5.jpg'
import img6 from './img6.jpg'
import img7 from './img7.jpg'
import img8 from './img8.jpg'
const spaAvedaCharisma = [img1, img2, img3, img4, img5, img6, img7, img8]
| export default spaAvedaCharisma | |
run_generate_classes.py | # -*- coding: utf-8 -*-
import sys
from os.path import dirname, abspath, normpath, join, realpath
from os import listdir, remove, system
import json
from datetime import datetime
begin = len(normpath(abspath(join(dirname(__file__), "../.."))))
end = len(normpath(abspath(join(dirname(__file__), ".."))))
MAIN_DIR = dirname(realpath(__file__))
package_name = MAIN_DIR[begin + 1 : end]
# Add the directory to the python path
sys.path.append(MAIN_DIR[:begin])
| )
exec("from " + package_name + ".Generator.read_fct import read_all")
exec("from " + package_name + ".definitions import MAIN_DIR, DOC_DIR, INT_DIR")
# List of the main packages (to sort the classes)
PACKAGE_LIST = ["Geometry", "Machine", "Material", "Slot", "Import"]
def generate_code(root_path, gen_dict=None):
"""Generate pyleecan Classes code according to doc in root_path
Parameters
----------
root_path : str
Path to the main folder of Pyleecan
gen_dict : dict
Generation dictionary (contains all the csv data)
Returns
-------
None
"""
CLASS_DIR = join(root_path, "Classes")
FUNC_DIR = join(root_path, "Functions")
DOC_DIR = join(root_path, "Generator", "ClassesRef")
print("Reading classes csv in: " + DOC_DIR)
print("Saving generated files in: " + CLASS_DIR)
path = __file__[__file__.index(package_name) :]
path = path.replace("\\", "/")
# Deleting all the previous class
print("Deleting old class files...")
for file_name in listdir(CLASS_DIR):
if file_name[0] != "_":
remove(join(CLASS_DIR, file_name))
# A file to import every classes quickly
import_file = open(join(CLASS_DIR, "import_all.py"), "w")
import_file.write("# -*- coding: utf-8 -*-\n\n")
import_file.write('"""File generated by generate_code() - \n')
import_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
# A file to select the constructor according to a string
load_file = open(join(FUNC_DIR, "load_switch.py"), "w")
load_file.write("# -*- coding: utf-8 -*-\n")
load_file.write('"""File generated by generate_code() - \n')
load_file.write('WARNING! All changes made in this file will be lost!\n"""\n\n')
load_file.write("from ..Classes.import_all import *\n\n")
load_file.write("load_switch = {\n")
# Read all the csv files
if gen_dict is None:
gen_dict = read_all(DOC_DIR)
# Generate all the class files (sorted to remove "commit noise")
for class_name, _ in iter(sorted(list(gen_dict.items()))):
import_file.write(
"from ..Classes." + class_name + " import " + class_name + "\n"
)
load_file.write(' "' + class_name + '": ' + class_name + ",\n")
print("Generation of " + class_name + " class")
generate_class(gen_dict, class_name, CLASS_DIR)
import_file.close()
load_file.write("}\n")
load_file.close()
print("Generation of load_switch.py")
print("Generation of import_all.py")
# Save gen_dict
class_dict_file = join(CLASS_DIR, "Class_Dict.json")
with open(class_dict_file, "w") as json_file:
json.dump(gen_dict, json_file, sort_keys=True, indent=4, separators=(",", ": "))
if __name__ == "__main__":
gen_dict = read_all(DOC_DIR, is_internal=False, in_path=INT_DIR)
generate_code(MAIN_DIR, gen_dict)
# Run black
try:
import black
system('"{}" -m black .'.format(sys.executable))
if black.__version__.split(".")[0] != "20":
print("\n############################################")
print(
"WARNING: The official version of black for pyleecan is 20, please update your black version"
)
print("############################################\n")
except ImportError:
print("/!\\ Please install and run black (version 20) /!\\")
now = datetime.now()
print("End at: ", now.strftime("%H:%M:%S")) | exec(
"from "
+ package_name
+ ".Generator.ClassGenerator.class_generator import generate_class" |
vote_set.go | package types
import (
"bytes"
"fmt"
"strings"
"sync"
"github.com/pkg/errors"
cmn "github.com/tendermint/tendermint/libs/common"
)
// UNSTABLE
// XXX: duplicate of p2p.ID to avoid dependence between packages.
// Perhaps we can have a minimal types package containing this (and other things?)
// that both `types` and `p2p` import ?
type P2PID string
/*
VoteSet helps collect signatures from validators at each height+round for a
predefined vote type.
投票集合主要是从验证者收集签名
We need VoteSet to be able to keep track of conflicting votes when validators
double-sign. Yet, we can't keep track of *all* the votes seen, as that could
be a DoS attack vector.
为了防止dos攻击,需要保证不能出现double-sign
投票集合存储地方
1.voteSet.votes
2.VoteSet.Votesbyblock
There are two storage areas for votes.
1. voteSet.votes
2. voteSet.votesByBlock
`.votes` is the "canonical" list of votes. It always has at least one vote,
if a vote from a validator had been seen at all. Usually it keeps track of
the first vote seen, but when a 2/3 majority is found, votes for that get priority
and are copied over from `.votesByBlock`.
`.votesByBlock` keeps track of a list of votes for a particular block. There
are two ways a &blockVotes{} gets created in `.votesByBlock`.
1. the first vote seen by a validator was for the particular block.
2. a peer claims to have seen 2/3 majority for the particular block.
Since the first vote from a validator will always get added in `.votesByBlock`
, all votes in `.votes` will have a corresponding entry in `.votesByBlock`.
When a &blockVotes{} in `.votesByBlock` reaches a 2/3 majority quorum, its
votes are copied into `.votes`.
All this is memory bounded because conflicting votes only get added if a peer
told us to track that block, each peer only gets to tell us 1 such block, and,
there's only a limited number of peers.
NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64.
*/
type VoteSet struct {
chainID string//链ID
height int64//高度
round int//轮次
type_ SignedMsgType//签名信息类型
valSet *ValidatorSet//验证者集合
mtx sync.Mutex//锁
votesBitArray *cmn.BitArray//投票的二进制表示
votes []*Vote // 投票集合
sum int64 // 现在票权总值
maj23 *BlockID // 第一次达到2/3权值区块id
votesByBlock map[string]*blockVotes // 与votes相同,只是备份存储
peerMaj23s map[P2PID]BlockID // 其他节点到达2/3的区块id
}
// Constructs a new VoteSet struct used to accumulate votes for given height/round.
//maj23如果等于nil,说明还没区块通过
func NewVoteSet(chainID string, height int64, round int, type_ SignedMsgType, valSet *ValidatorSet) *VoteSet {
if height == 0 {
cmn.PanicSanity("Cannot make VoteSet for height == 0, doesn't make sense.")
}
return &VoteSet{
chainID: chainID,
height: height,
round: round,
type_: type_,
valSet: valSet,
votesBitArray: cmn.NewBitArray(valSet.Size()),
votes: make([]*Vote, valSet.Size()),
sum: 0,
maj23: nil,
votesByBlock: make(map[string]*blockVotes, valSet.Size()),
peerMaj23s: make(map[P2PID]BlockID),
}
}
func (voteSet *VoteSet) ChainID() string {
return voteSet.chainID
}
func (voteSet *VoteSet) Height() int64 {
if voteSet == nil {
return 0
}
return voteSet.height
}
func (voteSet *VoteSet) Round() int {
if voteSet == nil {
return -1
}
return voteSet.round
}
func (voteSet *VoteSet) Type() byte {
if voteSet == nil {
return 0x00
}
return byte(voteSet.type_)
}
func (voteSet *VoteSet) Size() int {
if voteSet == nil {
return 0
}
return voteSet.valSet.Size()
}
// Returns added=true if vote is valid and new.
// Otherwise returns err=ErrVote[
// UnexpectedStep | InvalidIndex | InvalidAddress |
// InvalidSignature | InvalidBlockHash | ConflictingVotes ]
// Duplicate votes return added=false, err=nil.
// Conflicting votes return added=*, err=ErrVoteConflictingVotes.
// NOTE: vote should not be mutated after adding.
// NOTE: VoteSet must not be nil
// NOTE: Vote must not be nil
func (voteSet *VoteSet) AddVote(vote *Vote) (added bool, err error) {
if voteSet == nil {
cmn.PanicSanity("AddVote() on nil VoteSet")
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return voteSet.addVote(vote)
}
// NOTE: Validates as much as possible before attempting to verify the signature.
func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) {
if vote == nil {
return false, ErrVoteNil
}
//取出vote的具体值
valIndex := vote.ValidatorIndex
valAddr := vote.ValidatorAddress
blockKey := vote.BlockID.Key()
//投票者的基本index如果小于0,就是不在
// Ensure that validator index was set
if valIndex < 0 {
return false, errors.Wrap(ErrVoteInvalidValidatorIndex, "Index < 0")
} else if len(valAddr) == 0 {
return false, errors.Wrap(ErrVoteInvalidValidatorAddress, "Empty address")
}
//对投票基本信息进行核对
// Make sure the step matches.
if (vote.Height != voteSet.height) ||
(vote.Round != voteSet.round) ||
(vote.Type != voteSet.type_) {
return false, errors.Wrapf(ErrVoteUnexpectedStep, "Expected %d/%d/%d, but got %d/%d/%d",
voteSet.height, voteSet.round, voteSet.type_,
vote.Height, vote.Round, vote.Type)
}
// Ensure that signer is a validator.
//确保签名者在集合中
lookupAddr, val := voteSet.valSet.GetByIndex(valIndex)
if val == nil {
return false, errors.Wrapf(ErrVoteInvalidValidatorIndex,
"Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size())
}
// Ensure that the signer has the right address.
// 比对地址
if !bytes.Equal(valAddr, lookupAddr) {
return false, errors.Wrapf(ErrVoteInvalidValidatorAddress,
"vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)\nEnsure the genesis file is correct across all validators.",
valAddr, lookupAddr, valIndex)
}
//如果已经获取过这个投票,就返回
// If we already know of this vote, return false.
if existing, ok := voteSet.getVote(valIndex, blockKey); ok {
if bytes.Equal(existing.Signature, vote.Signature) {
return false, nil // duplicate
}
return false, errors.Wrapf(ErrVoteNonDeterministicSignature, "Existing vote: %v; New vote: %v", existing, vote)
}
//核对签名的准确性
// Check signature.
if err := vote.Verify(voteSet.chainID, val.PubKey); err != nil {
return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, val.PubKey)
}
//添加投票
// Add vote and get conflicting vote if any.
added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower)
if conflicting != nil {
return added, NewConflictingVoteError(val, conflicting, vote)
}
if !added {
cmn.PanicSanity("Expected to add non-conflicting vote")
}
return added, nil
}
// Returns (vote, true) if vote exists for valIndex and blockKey.
func (voteSet *VoteSet) getVote(valIndex int, blockKey string) (vote *Vote, ok bool) {
if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey {
return existing, true
}
if existing := voteSet.votesByBlock[blockKey].getByIndex(valIndex); existing != nil {
return existing, true
}
return nil, false
}
// Assumes signature is valid.
// If conflicting vote exists, returns it.
// 投票存在冲突
func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower int64) (added bool, conflicting *Vote) {
valIndex := vote.ValidatorIndex
// Already exists in voteSet.votes?
//该投票已经存在这个投票池中
if existing := voteSet.votes[valIndex]; existing != nil {
if existing.BlockID.Equals(vote.BlockID) {
cmn.PanicSanity("addVerifiedVote does not expect duplicate votes")
} else {
conflicting = existing
}
// Replace vote if blockKey matches voteSet.maj23.
//可能有些投票临时改决定,投正确票
if voteSet.maj23 != nil && voteSet.maj23.Key() == blockKey {
voteSet.votes[valIndex] = vote
voteSet.votesBitArray.SetIndex(valIndex, true)
}else{
//fmt.Println("与现在投票集合key不合",blockKey)
}
// Otherwise don't add it to voteSet.votes
} else {
//投票池不存在该选票
// Add to voteSet.votes and incr .sum
voteSet.votes[valIndex] = vote
voteSet.votesBitArray.SetIndex(valIndex, true)
//增加票权
voteSet.sum += votingPower
}
//对block进行操作
votesByBlock, ok := voteSet.votesByBlock[blockKey]
if ok {
if conflicting != nil && !votesByBlock.peerMaj23 {
// 存在冲突
// There's a conflict and no peer claims that this block is special.
return false, conflicting
}
// We'll add the vote in a bit.
} else {
// .votesByBlock doesn't exist...
if conflicting != nil {
// votesblock不存在
// ... and there's a conflicting vote.
// We're not even tracking this blockKey, so just forget it.
return false, conflicting
}
// ... and there's no conflicting vote.
// Start tracking this blockKey
//对这个区块开启一个单独的空间
votesByBlock = newBlockVotes(false, voteSet.valSet.Size())
voteSet.votesByBlock[blockKey] = votesByBlock
// We'll add the vote in a bit.
}
// Before adding to votesByBlock, see if we'll exceed quorum
//看看是否超过多数
//
//fmt.Println("votesByBlock.sum",votesByBlock.sum)
origSum := votesByBlock.sum
//看看权重是否大于2/3了
quorum := voteSet.valSet.TotalVotingPower()*2/3 + 1
//fmt.Println("quorum",quorum)
// Add vote to votesByBlock
//添加权重
votesByBlock.addVerifiedVote(vote, votingPower)
// If we just crossed the quorum threshold and have 2/3 majority...
if origSum < quorum && quorum <= votesByBlock.sum {
// Only consider the first quorum reached
if voteSet.maj23 == nil {
maj23BlockID := vote.BlockID
voteSet.maj23 = &maj23BlockID
// And also copy votes over to voteSet.votes
for i, vote := range votesByBlock.votes {
if vote != nil {
voteSet.votes[i] = vote
}
}
}
}
return true, conflicting
}
// If a peer claims that it has 2/3 majority for given blockKey, call this.
// NOTE: if there are too many peers, or too much peer churn,
// this can cause memory issues.
// TODO: implement ability to remove peers too
// NOTE: VoteSet must not be nil
func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error {
if voteSet == nil {
cmn.PanicSanity("SetPeerMaj23() on nil VoteSet")
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
blockKey := blockID.Key()
// Make sure peer hasn't already told us something.
if existing, ok := voteSet.peerMaj23s[peerID]; ok {
if existing.Equals(blockID) {
return nil // Nothing to do
}
return fmt.Errorf("SetPeerMaj23: Received conflicting blockID from peer %v. Got %v, expected %v",
peerID, blockID, existing)
}
voteSet.peerMaj23s[peerID] = blockID
// Create .votesByBlock entry if needed.
votesByBlock, ok := voteSet.votesByBlock[blockKey]
if ok {
if votesByBlock.peerMaj23 {
return nil // Nothing to do
}
votesByBlock.peerMaj23 = true
// No need to copy votes, already there.
} else {
votesByBlock = newBlockVotes(true, voteSet.valSet.Size())
voteSet.votesByBlock[blockKey] = votesByBlock
// No need to copy votes, no votes to copy over.
}
return nil
}
func (voteSet *VoteSet) BitArray() *cmn.BitArray {
if voteSet == nil {
return nil
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return voteSet.votesBitArray.Copy()
}
func (voteSet *VoteSet) BitArrayByBlockID(blockID BlockID) *cmn.BitArray {
if voteSet == nil {
return nil
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
votesByBlock, ok := voteSet.votesByBlock[blockID.Key()]
if ok {
return votesByBlock.bitArray.Copy()
}
return nil
}
// NOTE: if validator has conflicting votes, returns "canonical" vote
func (voteSet *VoteSet) GetByIndex(valIndex int) *Vote {
if voteSet == nil {
return nil
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return voteSet.votes[valIndex]
}
func (voteSet *VoteSet) GetByAddress(address []byte) *Vote {
if voteSet == nil {
return nil
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
valIndex, val := voteSet.valSet.GetByAddress(address)
if val == nil {
cmn.PanicSanity("GetByAddress(address) returned nil")
}
return voteSet.votes[valIndex]
}
func (voteSet *VoteSet) HasTwoThirdsMajority() bool {
if voteSet == nil {
return false
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return voteSet.maj23 != nil
}
func (voteSet *VoteSet) IsCommit() bool {
if voteSet == nil {
return false
}
if voteSet.type_ != PrecommitType {
return false
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return voteSet.maj23 != nil
}
func (voteSet *VoteSet) HasTwoThirdsAny() bool {
if voteSet == nil {
return false
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return voteSet.sum > voteSet.valSet.TotalVotingPower()*2/3
}
func (voteSet *VoteSet) HasAll() bool {
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return voteSet.sum == voteSet.valSet.TotalVotingPower()
}
// If there was a +2/3 majority for blockID, return blockID and true.
// Else, return the empty BlockID{} and false.
func (voteSet *VoteSet) TwoThirdsMajority() (blockID BlockID, ok bool) {
if voteSet == nil {
return BlockID{}, false
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
if voteSet.maj23 != nil {
//fmt.Println(voteSet.maj23)
return *voteSet.maj23, true
}
return BlockID{}, false
}
//--------------------------------------------------------------------------------
// Strings and JSON
func (voteSet *VoteSet) String() string {
if voteSet == nil {
return "nil-VoteSet"
}
return voteSet.StringIndented("")
}
func (voteSet *VoteSet) StringIndented(indent string) string {
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
voteStrings := make([]string, len(voteSet.votes))
for i, vote := range voteSet.votes {
if vote == nil {
voteStrings[i] = "nil-Vote"
} else {
voteStrings[i] = vote.String()
}
}
return fmt.Sprintf(`VoteSet{
%s H:%v R:%v T:%v
%s %v
%s %v
%s %v
%s}`,
indent, voteSet.height, voteSet.round, voteSet.type_,
indent, strings.Join(voteStrings, "\n"+indent+" "),
indent, voteSet.votesBitArray,
indent, voteSet.peerMaj23s,
indent)
}
// Marshal the VoteSet to JSON. Same as String(), just in JSON,
// and without the height/round/type_ (since its already included in the votes).
func (voteSet *VoteSet) MarshalJSON() ([]byte, error) {
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return cdc.MarshalJSON(VoteSetJSON{
voteSet.voteStrings(),
voteSet.bitArrayString(),
voteSet.peerMaj23s,
})
}
// More human readable JSON of the vote set
// NOTE: insufficient for unmarshalling from (compressed votes)
// TODO: make the peerMaj23s nicer to read (eg just the block hash)
type VoteSetJSON struct {
Votes []string `json:"votes"`
VotesBitArray string `json:"votes_bit_array"`
PeerMaj23s map[P2PID]BlockID `json:"peer_maj_23s"`
}
// Return the bit-array of votes including
// the fraction of power that has voted like:
// "BA{29:xx__x__x_x___x__x_______xxx__} 856/1304 = 0.66"
func (voteSet *VoteSet) BitArrayString() string {
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return voteSet.bitArrayString()
}
func (voteSet *VoteSet) bitArrayString() string {
bAString := voteSet.votesBitArray.String()
voted, total, fracVoted := voteSet.sumTotalFrac()
return fmt.Sprintf("%s %d/%d = %.2f", bAString, voted, total, fracVoted)
}
// Returns a list of votes compressed to more readable strings.
func (voteSet *VoteSet) VoteStrings() []string {
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
return voteSet.voteStrings()
}
func (voteSet *VoteSet) voteStrings() []string {
voteStrings := make([]string, len(voteSet.votes))
for i, vote := range voteSet.votes {
if vote == nil {
voteStrings[i] = "nil-Vote"
} else {
voteStrings[i] = vote.String()
}
}
return voteStrings
}
func (voteSet *VoteSet) StringShort() string {
if voteSet == nil {
return "nil-VoteSet"
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
_, _, frac := voteSet.sumTotalFrac()
return fmt.Sprintf(`VoteSet{H:%v R:%v T:%v +2/3:%v(%v) %v %v}`,
voteSet.height, voteSet.round, voteSet.type_, voteSet.maj23, frac, voteSet.votesBitArray, voteSet.peerMaj23s)
}
// return the power voted, the total, and the fraction
func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) {
voted, total := voteSet.sum, voteSet.valSet.TotalVotingPower()
fracVoted := float64(voted) / float64(total)
return voted, total, fracVoted
}
//--------------------------------------------------------------------------------
// Commit
func (voteSet *VoteSet) MakeCommit() *Commit {
if voteSet.type_ != PrecommitType {
cmn.PanicSanity("Cannot MakeCommit() unless VoteSet.Type is PrecommitType")
}
voteSet.mtx.Lock()
defer voteSet.mtx.Unlock()
// Make sure we have a 2/3 majority
if voteSet.maj23 == nil {
cmn.PanicSanity("Cannot MakeCommit() unless a blockhash has +2/3")
}
// For every validator, get the precommit
commitSigs := make([]*CommitSig, len(voteSet.votes))
for i, v := range voteSet.votes {
commitSigs[i] = v.CommitSig()
}
return NewCommit(*voteSet.maj23, commitSigs)
}
//--------------------------------------------------------------------------------
/*
Votes for a particular block
There are two ways a *blockVotes gets created for a blockKey.
1. first (non-conflicting) vote of a validator w/ blockKey (peerMaj23=false)
2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true)
*/
type blockVotes struct {
peerMaj23 bool // peer claims to have maj23
bitArray *cmn.BitArray // valIndex -> hasVote?
votes []*Vote // valIndex -> *Vote
sum int64 // vote sum
}
func newBlockVotes(peerMaj23 bool, numValidators int) *blockVotes {
return &blockVotes{
peerMaj23: peerMaj23,
bitArray: cmn.NewBitArray(numValidators),
votes: make([]*Vote, numValidators),
sum: 0,
}
}
func (vs *blockVotes) addVerifiedVote(vote *Vote, votingPower int64) {
//对权重进行叠加
valIndex := vote.ValidatorIndex
if existing := vs.votes[valIndex]; existing == nil {
vs.bitArray.SetIndex(valIndex, true)
vs.votes[valIndex] = vote
vs.sum += votingPower
}
}
func (vs *blockVotes) getByIndex(index int) *Vote {
if vs == nil {
return nil
}
return vs.v |
//--------------------------------------------------------------------------------
// Common interface between *consensus.VoteSet and types.Commit
type VoteSetReader interface {
Height() int64
Round() int
Type() byte
Size() int
BitArray() *cmn.BitArray
GetByIndex(int) *Vote
IsCommit() bool
}
| otes[index]
} |
ocr_client.py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.ocr.v20181119 import models
class OcrClient(AbstractClient):
_apiVersion = '2018-11-19'
_endpoint = 'ocr.tencentcloudapi.com'
def GeneralBasicOCR(self, request):
"""通用印刷体识别接口用于提供图像整体文字的检测和识别服务,返回文字框位置与文字内容。支持多场景、任意版面下整图文字的识别,以及中英文、字母、数字和日文、韩文的识别。应用场景包括:印刷文档识别、网络图片识别、广告图文字识别、街景店招识别、菜单识别、视频标题识别、头像文字识别等。
:param request: 调用GeneralBasicOCR所需参数的结构体。
:type request: :class:`tencentcloud.ocr.v20181119.models.GeneralBasicOCRRequest`
:rtype: :class:`tencentcloud.ocr.v20181119.models.GeneralBasicOCRResponse`
"""
try:
params = request._serialize()
body = self.call("GeneralBasicOCR", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GeneralBasicOCRResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def GeneralFastOCR(self, request):
"""通用印刷体识别(高速版)接口用于提供图像整体文字的检测和识别服务,返回文字框位置与文字内容。相比通用印刷体识别接口,识别速度更快、支持的QPS更高。
:param request: 调用GeneralFastOCR所需参数的结构体。
:type request: :class:`tencentcloud.ocr.v20181119.models.GeneralFastOCRRequest`
:rtype: :class:`tencentcloud.ocr.v20181119.models.GeneralFastOCRResponse`
"""
try:
params = request._serialize()
body = self.call("GeneralFastOCR", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.GeneralFastOCRResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def IDCardOCR(self, request):
"""身份证识别接口支持二代身份证正反面所有字段的识别,包括姓名、性别、民族、出生日期、住址、公民身份证号、签发机关、有效期限;具备身份证照片、人像照片的裁剪功能和翻拍件、复印件的识别告警功能。应用场景包括:银行开户、用户注册、人脸核身等各种身份证信息有效性核验场景。
:param request: 调用IDCardOCR所需参数的结构体。
:type request: :class:`tencentcloud.ocr.v20181119.models.IDCardOCRRequest`
:rtype: :class:`tencentcloud.ocr.v20181119.models.IDCardOCRResponse`
"""
try:
params = request._serialize()
body = self.call("IDCardOCR", params)
| response = json.loads(body)
if "Error" not in response["Response"]:
model = models.IDCardOCRResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message) |
|
config.go | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package istanbul
import (
"math/big"
"sync"
"github.com/naoina/toml"
)
type ProposerPolicyId uint64
const (
RoundRobin ProposerPolicyId = iota
Sticky
)
// ProposerPolicy represents the Validator Proposer Policy
type ProposerPolicy struct {
Id ProposerPolicyId // Could be RoundRobin or Sticky
By ValidatorSortByFunc // func that defines how the ValidatorSet should be sorted
registry []ValidatorSet // Holds the ValidatorSet for a given block height
registryMU *sync.Mutex // Mutex to lock access to changes to Registry
}
// NewRoundRobinProposerPolicy returns a RoundRobin ProposerPolicy with ValidatorSortByString as default sort function
func NewRoundRobinProposerPolicy() *ProposerPolicy |
// NewStickyProposerPolicy return a Sticky ProposerPolicy with ValidatorSortByString as default sort function
func NewStickyProposerPolicy() *ProposerPolicy {
return NewProposerPolicy(Sticky)
}
func NewProposerPolicy(id ProposerPolicyId) *ProposerPolicy {
return NewProposerPolicyByIdAndSortFunc(id, ValidatorSortByString())
}
func NewProposerPolicyByIdAndSortFunc(id ProposerPolicyId, by ValidatorSortByFunc) *ProposerPolicy {
return &ProposerPolicy{Id: id, By: by, registryMU: new(sync.Mutex)}
}
type proposerPolicyToml struct {
Id ProposerPolicyId
}
func (p *ProposerPolicy) MarshalTOML() ([]byte, error) {
pp := &proposerPolicyToml{Id: p.Id}
return toml.Marshal(pp)
}
func (p *ProposerPolicy) UnmarshalTOML(input []byte) error {
var pp proposerPolicyToml
err := toml.Unmarshal(input, &pp)
if err != nil {
return err
}
p.Id = pp.Id
p.By = ValidatorSortByString()
return nil
}
// Use sets the ValidatorSortByFunc for the given ProposerPolicy and sorts the validatorSets according to it
func (p *ProposerPolicy) Use(v ValidatorSortByFunc) {
p.By = v
for _, validatorSet := range p.registry {
validatorSet.SortValidators()
}
}
// RegisterValidatorSet stores the given ValidatorSet in the policy registry
func (p *ProposerPolicy) RegisterValidatorSet(valSet ValidatorSet) {
p.registryMU.Lock()
defer p.registryMU.Unlock()
if len(p.registry) == 0 {
p.registry = []ValidatorSet{valSet}
} else {
p.registry = append(p.registry, valSet)
}
}
// ClearRegistry removes any ValidatorSet from the ProposerPolicy registry
func (p *ProposerPolicy) ClearRegistry() {
p.registryMU.Lock()
defer p.registryMU.Unlock()
p.registry = nil
}
type Config struct {
RequestTimeout uint64 `toml:",omitempty"` // The timeout for each Istanbul round in milliseconds.
BlockPeriod uint64 `toml:",omitempty"` // Default minimum difference between two consecutive block's timestamps in second
ProposerPolicy *ProposerPolicy `toml:",omitempty"` // The policy for proposer selection
Epoch uint64 `toml:",omitempty"` // The number of blocks after which to checkpoint and reset the pending votes
Ceil2Nby3Block *big.Int `toml:",omitempty"` // Number of confirmations required to move from one state to next [2F + 1 to Ceil(2N/3)]
AllowedFutureBlockTime uint64 `toml:",omitempty"` // Max time (in seconds) from current time allowed for blocks, before they're considered future blocks
TestQBFTBlock *big.Int `toml:",omitempty"` // Fork block at which block confirmations are done using qbft consensus instead of ibft
}
var DefaultConfig = &Config{
RequestTimeout: 10000,
BlockPeriod: 1,
ProposerPolicy: NewRoundRobinProposerPolicy(),
Epoch: 30000,
Ceil2Nby3Block: big.NewInt(0),
AllowedFutureBlockTime: 0,
TestQBFTBlock: big.NewInt(0),
}
// QBFTBlockNumber returns the qbftBlock fork block number, returns -1 if qbftBlock is not defined
func (c Config) QBFTBlockNumber() int64 {
if c.TestQBFTBlock == nil {
return -1
}
return c.TestQBFTBlock.Int64()
}
// IsQBFTConsensusAt checks if qbft consensus is enabled for the block height identified by the given header
func (c *Config) IsQBFTConsensusAt(blockNumber *big.Int) bool {
// If qbftBlock is not defined in genesis qbft consensus is not used
if c.TestQBFTBlock == nil {
return false
}
if c.TestQBFTBlock.Uint64() == 0 {
return true
}
if blockNumber.Cmp(c.TestQBFTBlock) >= 0 {
return true
}
return false
}
| {
return NewProposerPolicy(RoundRobin)
} |
main_keyboard_recorder_v2.py | import tkinter as tk | import time
import threading
from functools import partial
class Application(tk.Frame):
class MyButton(tk.Button):
def __init__(self, *args, **kwargs):
tk.Button.__init__(self, *args, **kwargs)
class MyLabel(tk.Label):
def __init__(self, *args, **kwargs):
tk.Label.__init__(self, *args, **kwargs)
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.master.geometry('200x200')
bt = self.MyButton(fg='red', bg='yellow')
bt["text"] = 'Selecione arquivo'
bt["command"] = lambda: self.start(self.selec_arq)
bt.pack(side="top")
bt = self.MyButton(fg='black', bg='red')
bt["text"] = 'GRAVAR'
bt["command"] = lambda: self.start(self.gravando)
bt.pack(side="top")
# self.create_widgets()
bt = self.MyButton(fg='black', bg='green')
bt["text"] = 'REPRODUZIR'
bt["command"] = lambda: self.start(self.executa)
bt.pack(side="top")
entr = self.MyLabel()
entr['text'] = "Pressione F8 para parar a gravação e salvar o arquivo"
entr.pack()
# self.create_widgets()
# threads
def refresh(self):
self.master.update()
self.master.after(1000, self.refresh)
def start(self, target):
self.refresh()
threading.Thread(target=target).start()
# #######
def selec_arq(self):
fld = filedialog.askopenfilename(defaultextension='txt', filetypes=(('text files', 'txt'), ), initialdir=path.dirname(__file__))
self.mk_fld(fld)
return fld
def cria_arq(self):
fld0 = filedialog.asksaveasfilename(title="Salve a gravação", filetypes=(('text files', 'txt'), ))
fld = f'{fld0}.txt' if path.splitext(fld0)[1] == '' else fld0
self.mk_fld(fld)
return fld
def mk_fld(self, fld):
try:
self.arq0atual_label.pack_forget()
except (AttributeError, NameError):
pass
if fld == '':
fld = str(time.time()).replace('.', '')
fld += '.txt'
fld_resume = fld.replace(fld[3:len(fld) - int(len(fld)/2)], '...')
self.arq0atual_label = tk.Label(text=f"arquivo atual: {fld_resume}")
self.arq0atual_label.pack()
self.arq0atual = fld
def mk_kboard_instance(self, select=False):
try:
narq = self.arq0atual
except AttributeError:
if select:
narq = self.selec_arq()
else:
narq = self.cria_arq()
print('narq: ')
dale = MyKeyboardV001(narq)
return dale
def gravando(self):
print('Gravando')
dale = self.mk_kboard_instance()
dale.listen()
dale.backup()
def executa(self):
dale = self.mk_kboard_instance(select=True)
dale.playitbackup()
messagebox.showinfo('FIM!!!', f'Arquivo {self.arq0atual} EXECUTADO COM SUCESSO. [enter] para continuar')
root = tk.Tk()
app = Application(master=root)
# dale = tk.Label(root, text="testedsadasdasdasdadasddsadas").pack()
# app.MyButton(text="Testo").pack()
app.mainloop() | from tkinter import filedialog, messagebox
from os import path
import pickle
from utils import MyKeyboardV001 |
test_http_sensor.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest.mock import patch
import requests
from airflow import DAG
from airflow.exceptions import AirflowException, AirflowSensorTimeout
from airflow.models import TaskInstance
from airflow.operators.http_operator import SimpleHttpOperator
from airflow.sensors.http_sensor import HttpSensor
from airflow.utils.timezone import datetime
from tests.compat import mock
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
TEST_DAG_ID = 'unit_test_dag'
class TestHttpSensor(unittest.TestCase):
def setUp(self):
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE
}
self.dag = DAG(TEST_DAG_ID, default_args=args)
@patch("airflow.hooks.http_hook.requests.Session.send")
def test_poke_exception(self, mock_session_send):
"""
Exception occurs in poke function should not be ignored.
"""
response = requests.Response()
response.status_code = 200
mock_session_send.return_value = response
def resp_check(resp):
raise AirflowException('AirflowException raised here!')
task = HttpSensor(
task_id='http_sensor_poke_exception',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=resp_check,
timeout=5,
poke_interval=1)
with self.assertRaisesRegex(AirflowException, 'AirflowException raised here!'):
task.execute(context={})
@patch("airflow.hooks.http_hook.requests.Session.send")
def test_head_method(self, mock_session_send):
def resp_check(_):
return True
task = HttpSensor(
dag=self.dag,
task_id='http_sensor_head_method',
http_conn_id='http_default',
endpoint='',
request_params={},
method='HEAD',
response_check=resp_check,
timeout=5,
poke_interval=1)
task.execute(context={})
args, kwargs = mock_session_send.call_args
received_request = args[0]
prep_request = requests.Request(
'HEAD',
'https://www.google.com',
{}).prepare()
self.assertEqual(prep_request.url, received_request.url)
self.assertTrue(prep_request.method, received_request.method)
@patch("airflow.hooks.http_hook.requests.Session.send")
def test_poke_context(self, mock_session_send):
response = requests.Response()
response.status_code = 200
mock_session_send.return_value = response
def resp_check(resp, execution_date):
if execution_date == DEFAULT_DATE:
return True
raise AirflowException('AirflowException raised here!')
task = HttpSensor(
task_id='http_sensor_poke_exception',
http_conn_id='http_default',
endpoint='',
request_params={},
response_check=resp_check,
timeout=5,
poke_interval=1,
dag=self.dag)
task_instance = TaskInstance(task=task, execution_date=DEFAULT_DATE)
task.execute(task_instance.get_template_context())
@patch("airflow.hooks.http_hook.requests.Session.send")
def test_logging_head_error_request(
self,
mock_session_send
):
def resp_check(_):
return True
response = requests.Response()
response.status_code = 404
response.reason = 'Not Found'
mock_session_send.return_value = response
task = HttpSensor(
dag=self.dag,
task_id='http_sensor_head_method',
http_conn_id='http_default',
endpoint='',
request_params={},
method='HEAD',
response_check=resp_check,
timeout=5,
poke_interval=1
)
with mock.patch.object(task.hook.log, 'error') as mock_errors:
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
self.assertTrue(mock_errors.called)
calls = [
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
mock.call('HTTP error: %s', 'Not Found'),
]
mock_errors.assert_has_calls(calls)
class FakeSession:
def __init__(self):
self.response = requests.Response()
self.response.status_code = 200
self.response._content = 'apache/airflow'.encode('ascii', 'ignore')
def send(self, *args, **kwargs):
return self.response
def prepare_request(self, request):
if 'date' in request.params:
self.response._content += (
'/' + request.params['date']
).encode('ascii', 'ignore')
return self.response
class TestHttpOpSensor(unittest.TestCase):
| def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE_ISO}
dag = DAG(TEST_DAG_ID, default_args=args)
self.dag = dag
@mock.patch('requests.Session', FakeSession)
def test_get(self):
t = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_get_response_check(self):
t = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
response_check=lambda response: ("apache/airflow" in response.text),
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch('requests.Session', FakeSession)
def test_sensor(self):
sensor = HttpSensor(
task_id='http_sensor_check',
http_conn_id='http_default',
endpoint='/search',
request_params={"client": "ubuntu", "q": "airflow", 'date': '{{ds}}'},
headers={},
response_check=lambda response: (
"apache/airflow/" + DEFAULT_DATE.strftime('%Y-%m-%d')
in response.text),
poke_interval=5,
timeout=15,
dag=self.dag)
sensor.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True) |
|
write.rs | use crate::io;
use crate::path::Path;
use crate::task::spawn_blocking;
use crate::utils::Context as _;
/// Writes a slice of bytes as the new contents of a file.
///
/// This function will create a file if it does not exist, and will entirely replace its contents
/// if it does.
///
/// This function is an async version of [`std::fs::write`].
///
/// [`std::fs::write`]: https://doc.rust-lang.org/std/fs/fn.write.html
///
/// # Errors
///
/// An error will be returned in the following situations:
///
/// * The file's parent directory does not exist.
/// * The current process lacks permissions to write to the file.
/// * Some other I/O error occurred.
///
/// # Examples
///
/// ```no_run
/// # fn main() -> std::io::Result<()> { async_std::task::block_on(async {
/// #
/// use async_std::fs;
///
/// fs::write("a.txt", b"Hello world!").await?;
/// #
/// # Ok(()) }) }
/// ```
pub async fn write<P: AsRef<Path>, C: AsRef<[u8]>>(path: P, contents: C) -> io::Result<()> | {
let path = path.as_ref().to_owned();
let contents = contents.as_ref().to_owned();
spawn_blocking(move || {
std::fs::write(&path, contents)
.context(|| format!("could not write to file `{}`", path.display()))
})
.await
} |
|
bibtex.rs | // Copyright 2016-2019 the Tectonic Project
// Licensed under the MIT License.
use std::collections::HashSet;
use tectonic::engines::NoopIoEventBackend;
use tectonic::io::stdstreams::GenuineStdoutIo;
use tectonic::io::{FilesystemIo, IoProvider, IoStack, MemoryIo};
use tectonic::status::NoopStatusBackend;
use tectonic::BibtexEngine;
#[path = "util/mod.rs"]
mod util;
use crate::util::{test_path, ExpectedInfo};
struct TestCase {
stem: String,
}
impl TestCase {
fn new(stem: &str) -> Self {
TestCase {
stem: stem.to_owned(),
}
}
fn go(&mut self) |
}
#[test]
fn single_entry() {
TestCase::new("single_entry").go()
}
| {
util::set_test_root();
let mut p = test_path(&["bibtex"]);
p.push(&self.stem);
p.set_extension("aux");
let auxname = p.file_name().unwrap().to_str().unwrap().to_owned();
// MemoryIo layer that will accept the outputs.
let mut mem = MemoryIo::new(true);
let mut assets = FilesystemIo::new(&test_path(&["bibtex"]), false, false, HashSet::new());
let mut genio = GenuineStdoutIo::new();
let io_list: Vec<&mut dyn IoProvider> = vec![&mut genio, &mut mem, &mut assets];
let mut io = IoStack::new(io_list);
let mut events = NoopIoEventBackend::new();
let mut status = NoopStatusBackend::new();
BibtexEngine::new()
.process(&mut io, &mut events, &mut status, &auxname)
.unwrap();
// Check that outputs match expectations.
let expected_bbl = ExpectedInfo::read_with_extension(&mut p, "bbl");
let expected_blg = ExpectedInfo::read_with_extension(&mut p, "blg");
let files = mem.files.borrow();
expected_bbl.test_from_collection(&files);
expected_blg.test_from_collection(&files);
} |
validator_test.go | package types
import (
"fmt"
"math/rand"
"reflect"
"sort"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto/ed25519"
tmtypes "github.com/tendermint/tendermint/types"
yaml "gopkg.in/yaml.v2"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
)
func TestValidatorTestEquivalent(t *testing.T) {
val1 := NewValidator(valAddr1, pk1, Description{})
val2 := NewValidator(valAddr1, pk1, Description{})
ok := val1.TestEquivalent(val2)
require.True(t, ok)
val2 = NewValidator(valAddr2, pk2, Description{})
ok = val1.TestEquivalent(val2)
require.False(t, ok)
}
func TestUpdateDescription(t *testing.T) {
d1 := Description{
Website: "https://validator.cosmos",
Details: "Test validator",
}
d2 := Description{
Moniker: DoNotModifyDesc,
Identity: DoNotModifyDesc,
Website: DoNotModifyDesc,
Details: DoNotModifyDesc,
}
d3 := Description{
Moniker: "",
Identity: "",
Website: "",
Details: "",
}
d, err := d1.UpdateDescription(d2)
require.Nil(t, err)
require.Equal(t, d, d1)
d, err = d1.UpdateDescription(d3)
require.Nil(t, err)
require.Equal(t, d, d3)
}
func TestABCIValidatorUpdate(t *testing.T) |
func TestABCIValidatorUpdateZero(t *testing.T) {
validator := NewValidator(valAddr1, pk1, Description{})
abciVal := validator.ABCIValidatorUpdateZero()
require.Equal(t, tmtypes.TM2PB.PubKey(validator.ConsPubKey), abciVal.PubKey)
require.Equal(t, int64(0), abciVal.Power)
}
func TestShareTokens(t *testing.T) {
validator := Validator{
OperatorAddress: valAddr1,
ConsPubKey: pk1,
Status: sdk.Bonded,
Tokens: sdk.NewInt(100),
DelegatorShares: sdk.NewDec(100),
}
assert.True(sdk.DecEq(t, sdk.NewDec(50), validator.TokensFromShares(sdk.NewDec(50))))
validator.Tokens = sdk.NewInt(50)
assert.True(sdk.DecEq(t, sdk.NewDec(25), validator.TokensFromShares(sdk.NewDec(50))))
assert.True(sdk.DecEq(t, sdk.NewDec(5), validator.TokensFromShares(sdk.NewDec(10))))
}
func TestRemoveTokens(t *testing.T) {
valPubKey := pk1
valAddr := sdk.ValAddress(valPubKey.Address().Bytes())
validator := Validator{
OperatorAddress: valAddr,
ConsPubKey: valPubKey,
Status: sdk.Bonded,
Tokens: sdk.NewInt(100),
DelegatorShares: sdk.NewDec(100),
}
// remove tokens and test check everything
validator = validator.RemoveTokens(sdk.NewInt(10))
require.Equal(t, int64(90), validator.Tokens.Int64())
// update validator to from bonded -> unbonded
validator = validator.UpdateStatus(sdk.Unbonded)
require.Equal(t, sdk.Unbonded, validator.Status)
validator = validator.RemoveTokens(sdk.NewInt(10))
require.Panics(t, func() { validator.RemoveTokens(sdk.NewInt(-1)) })
require.Panics(t, func() { validator.RemoveTokens(sdk.NewInt(100)) })
}
func TestAddTokensValidatorBonded(t *testing.T) {
validator := NewValidator(sdk.ValAddress(pk1.Address().Bytes()), pk1, Description{})
validator = validator.UpdateStatus(sdk.Bonded)
validator, delShares := validator.AddTokensFromDel(sdk.NewInt(10))
assert.True(sdk.DecEq(t, sdk.NewDec(10), delShares))
assert.True(sdk.IntEq(t, sdk.NewInt(10), validator.BondedTokens()))
assert.True(sdk.DecEq(t, sdk.NewDec(10), validator.DelegatorShares))
}
func TestAddTokensValidatorUnbonding(t *testing.T) {
validator := NewValidator(sdk.ValAddress(pk1.Address().Bytes()), pk1, Description{})
validator = validator.UpdateStatus(sdk.Unbonding)
validator, delShares := validator.AddTokensFromDel(sdk.NewInt(10))
assert.True(sdk.DecEq(t, sdk.NewDec(10), delShares))
assert.Equal(t, sdk.Unbonding, validator.Status)
assert.True(sdk.IntEq(t, sdk.NewInt(10), validator.Tokens))
assert.True(sdk.DecEq(t, sdk.NewDec(10), validator.DelegatorShares))
}
func TestAddTokensValidatorUnbonded(t *testing.T) {
validator := NewValidator(sdk.ValAddress(pk1.Address().Bytes()), pk1, Description{})
validator = validator.UpdateStatus(sdk.Unbonded)
validator, delShares := validator.AddTokensFromDel(sdk.NewInt(10))
assert.True(sdk.DecEq(t, sdk.NewDec(10), delShares))
assert.Equal(t, sdk.Unbonded, validator.Status)
assert.True(sdk.IntEq(t, sdk.NewInt(10), validator.Tokens))
assert.True(sdk.DecEq(t, sdk.NewDec(10), validator.DelegatorShares))
}
// TODO refactor to make simpler like the AddToken tests above
func TestRemoveDelShares(t *testing.T) {
valA := Validator{
OperatorAddress: sdk.ValAddress(pk1.Address().Bytes()),
ConsPubKey: pk1,
Status: sdk.Bonded,
Tokens: sdk.NewInt(100),
DelegatorShares: sdk.NewDec(100),
}
// Remove delegator shares
valB, coinsB := valA.RemoveDelShares(sdk.NewDec(10))
require.Equal(t, int64(10), coinsB.Int64())
require.Equal(t, int64(90), valB.DelegatorShares.RoundInt64())
require.Equal(t, int64(90), valB.BondedTokens().Int64())
// specific case from random tests
poolTokens := sdk.NewInt(5102)
delShares := sdk.NewDec(115)
validator := Validator{
OperatorAddress: sdk.ValAddress(pk1.Address().Bytes()),
ConsPubKey: pk1,
Status: sdk.Bonded,
Tokens: poolTokens,
DelegatorShares: delShares,
}
shares := sdk.NewDec(29)
_, tokens := validator.RemoveDelShares(shares)
require.True(sdk.IntEq(t, sdk.NewInt(1286), tokens))
}
func TestAddTokensFromDel(t *testing.T) {
validator := NewValidator(sdk.ValAddress(pk1.Address().Bytes()), pk1, Description{})
validator, shares := validator.AddTokensFromDel(sdk.NewInt(6))
require.True(sdk.DecEq(t, sdk.NewDec(6), shares))
require.True(sdk.DecEq(t, sdk.NewDec(6), validator.DelegatorShares))
require.True(sdk.IntEq(t, sdk.NewInt(6), validator.Tokens))
validator, shares = validator.AddTokensFromDel(sdk.NewInt(3))
require.True(sdk.DecEq(t, sdk.NewDec(3), shares))
require.True(sdk.DecEq(t, sdk.NewDec(9), validator.DelegatorShares))
require.True(sdk.IntEq(t, sdk.NewInt(9), validator.Tokens))
}
func TestUpdateStatus(t *testing.T) {
validator := NewValidator(sdk.ValAddress(pk1.Address().Bytes()), pk1, Description{})
validator, _ = validator.AddTokensFromDel(sdk.NewInt(100))
require.Equal(t, sdk.Unbonded, validator.Status)
require.Equal(t, int64(100), validator.Tokens.Int64())
// Unbonded to Bonded
validator = validator.UpdateStatus(sdk.Bonded)
require.Equal(t, sdk.Bonded, validator.Status)
// Bonded to Unbonding
validator = validator.UpdateStatus(sdk.Unbonding)
require.Equal(t, sdk.Unbonding, validator.Status)
// Unbonding to Bonded
validator = validator.UpdateStatus(sdk.Bonded)
require.Equal(t, sdk.Bonded, validator.Status)
}
func TestPossibleOverflow(t *testing.T) {
delShares := sdk.NewDec(391432570689183511).Quo(sdk.NewDec(40113011844664))
validator := Validator{
OperatorAddress: sdk.ValAddress(pk1.Address().Bytes()),
ConsPubKey: pk1,
Status: sdk.Bonded,
Tokens: sdk.NewInt(2159),
DelegatorShares: delShares,
}
newValidator, _ := validator.AddTokensFromDel(sdk.NewInt(71))
require.False(t, newValidator.DelegatorShares.IsNegative())
require.False(t, newValidator.Tokens.IsNegative())
}
func TestValidatorMarshalUnmarshalJSON(t *testing.T) {
validator := NewValidator(valAddr1, pk1, Description{})
js, err := codec.Cdc.MarshalJSON(validator)
require.NoError(t, err)
require.NotEmpty(t, js)
require.Contains(t, string(js), "\"consensus_pubkey\":\"cosmosvalconspu")
got := &Validator{}
err = codec.Cdc.UnmarshalJSON(js, got)
assert.NoError(t, err)
assert.Equal(t, validator, *got)
}
func TestValidatorSetInitialCommission(t *testing.T) {
val := NewValidator(valAddr1, pk1, Description{})
testCases := []struct {
validator Validator
commission Commission
expectedErr bool
}{
{val, NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.ZeroDec()), false},
{val, NewCommission(sdk.ZeroDec(), sdk.NewDecWithPrec(-1, 1), sdk.ZeroDec()), true},
{val, NewCommission(sdk.ZeroDec(), sdk.NewDec(15000000000), sdk.ZeroDec()), true},
{val, NewCommission(sdk.NewDecWithPrec(-1, 1), sdk.ZeroDec(), sdk.ZeroDec()), true},
{val, NewCommission(sdk.NewDecWithPrec(2, 1), sdk.NewDecWithPrec(1, 1), sdk.ZeroDec()), true},
{val, NewCommission(sdk.ZeroDec(), sdk.ZeroDec(), sdk.NewDecWithPrec(-1, 1)), true},
{val, NewCommission(sdk.ZeroDec(), sdk.NewDecWithPrec(1, 1), sdk.NewDecWithPrec(2, 1)), true},
}
for i, tc := range testCases {
val, err := tc.validator.SetInitialCommission(tc.commission)
if tc.expectedErr {
require.Error(t, err,
"expected error for test case #%d with commission: %s", i, tc.commission,
)
} else {
require.NoError(t, err,
"unexpected error for test case #%d with commission: %s", i, tc.commission,
)
require.Equal(t, tc.commission, val.Commission,
"invalid validator commission for test case #%d with commission: %s", i, tc.commission,
)
}
}
}
func TestValidatorMarshalYAML(t *testing.T) {
validator := NewValidator(valAddr1, pk1, Description{})
bechifiedPub, err := sdk.Bech32ifyConsPub(validator.ConsPubKey)
require.NoError(t, err)
bs, err := yaml.Marshal(validator)
require.NoError(t, err)
want := fmt.Sprintf(`|
operatoraddress: %s
conspubkey: %s
jailed: false
status: 0
tokens: "0"
delegatorshares: "0.000000000000000000"
description:
moniker: ""
identity: ""
website: ""
security_contact: ""
details: ""
unbondingheight: 0
unbondingcompletiontime: 1970-01-01T00:00:00Z
commission:
commission_rates:
rate: "0.000000000000000000"
max_rate: "0.000000000000000000"
max_change_rate: "0.000000000000000000"
update_time: 1970-01-01T00:00:00Z
minselfdelegation: "1"
`, validator.OperatorAddress.String(), bechifiedPub)
require.Equal(t, want, string(bs))
}
// Check that sort will create deterministic ordering of validators
func TestValidatorsSortDeterminism(t *testing.T) {
vals := make([]Validator, 10)
sortedVals := make([]Validator, 10)
// Create random validator slice
for i := range vals {
pk := ed25519.GenPrivKey().PubKey()
vals[i] = NewValidator(sdk.ValAddress(pk.Address()), pk, Description{})
}
// Save sorted copy
sort.Sort(Validators(vals))
copy(sortedVals, vals)
// Randomly shuffle validators, sort, and check it is equal to original sort
for i := 0; i < 10; i++ {
rand.Shuffle(10, func(i, j int) {
it := vals[i]
vals[i] = vals[j]
vals[j] = it
})
Validators(vals).Sort()
require.True(t, reflect.DeepEqual(sortedVals, vals), "Validator sort returned different slices")
}
}
| {
validator := NewValidator(valAddr1, pk1, Description{})
abciVal := validator.ABCIValidatorUpdate()
require.Equal(t, tmtypes.TM2PB.PubKey(validator.ConsPubKey), abciVal.PubKey)
require.Equal(t, validator.BondedTokens().Int64(), abciVal.Power)
} |
shelf.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { ShelfComponent } from './shelf.component';
describe('ShelfComponent', () => {
let component: ShelfComponent;
let fixture: ComponentFixture<ShelfComponent>;
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ ShelfComponent ]
})
.compileComponents();
}));
| component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
}); | beforeEach(() => {
fixture = TestBed.createComponent(ShelfComponent); |
index.js | let env = require('./env') | let readArc = require('./read-arc')
module.exports = {
env,
getPorts,
checkPort,
maybeHydrate,
readArc
} | let { getPorts, checkPort } = require('./ports')
let maybeHydrate = require('./maybe-hydrate') |
build.rs | #![allow(non_snake_case)]
use std::io::{Result,Error,ErrorKind};
use std::path::{PathBuf};
// use super::genv;
use crate::rock::misc;
use serde_json::Value;
use std::collections::HashMap;
use std::fs::{metadata};
use std::process::Child;
// use std::thread;
// use std::time::Duration;
use std::env;
fn cmpFileTime(a:&PathBuf,b:&PathBuf)->bool
{
let amd = metadata(a).unwrap().modified().unwrap();
let bmd = metadata(b).unwrap().modified().unwrap();
return amd <= bmd;
}
fn buildObjectFile(opt: &mut misc::OptionData) -> Result<Option<Child>> {
let mut obj:PathBuf;
let src:PathBuf;
let mut cross:PathBuf;
let mut cmd:String;
let mut args:String = String::new();
if !opt.ASMSource.is_empty()
{
src = opt.ASMSource.pop().unwrap();
obj = src.clone();
obj.set_extension("o");
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.AS);
cmd = String::from(cross.to_str().unwrap());
}
else
{
cmd = opt.CROSSCOMPILE.clone();
cmd.push_str(opt.AS.as_str());
}
args.push_str("-c");
if opt.Type == "ShareLib"
{
args.push(' ');
args.push_str("-fPIC");
}
args.push(' ');
args.push_str(src.to_str().unwrap());
args.push(' ');
args.push_str("-o");
args.push(' ');
args.push_str(obj.to_str().unwrap());
if !opt.AFLAGS.is_empty()
{
args.push(' ');
args.push_str(opt.AFLAGS.as_str());
}
if !opt.DEFS.is_empty()
{
args.push(' ');
args.push_str(opt.DEFS.as_str());
}
if !opt.INCLUDES.is_empty()
{
args.push(' ');
args.push_str(opt.INCLUDES.as_str());
}
}
else if !opt.CSource.is_empty()
{
src = opt.CSource.pop().unwrap();
obj = src.clone();
obj.set_extension("o");
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.CC);
cmd = String::from(cross.to_str().unwrap());
}
else
{
cmd = opt.CROSSCOMPILE.clone();
cmd.push_str(opt.CC.as_str());
}
args.push_str("-c");
if opt.Type == "ShareLib"
{
args.push(' ');
args.push_str("-fPIC");
}
args.push(' ');
args.push_str(src.to_str().unwrap());
args.push(' ');
args.push_str("-o");
args.push(' ');
args.push_str(obj.to_str().unwrap());
if !opt.CFLAGS.is_empty()
{
args.push(' ');
args.push_str(opt.CFLAGS.as_str());
}
if !opt.DEFS.is_empty()
{
args.push(' ');
args.push_str(opt.DEFS.as_str());
}
if !opt.LIBPATH.is_empty()
{
args.push(' ');
args.push_str(opt.LIBPATH.as_str());
}
if !opt.INCLUDES.is_empty()
{
args.push(' ');
args.push_str(opt.INCLUDES.as_str());
}
}
else if !opt.CXXSource.is_empty()
{
src = opt.CXXSource.pop().unwrap();
obj = src.clone();
obj.set_extension("o");
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.CXX);
cmd = String::from(cross.to_str().unwrap());
}
else
{
cmd = opt.CROSSCOMPILE.clone();
cmd.push_str(opt.CXX.as_str());
}
args.push_str("-c");
if opt.Type == "ShareLib"
{
args.push(' ');
args.push_str("-fPIC");
}
args.push(' ');
args.push_str(src.to_str().unwrap());
args.push(' ');
args.push_str("-o");
args.push(' ');
args.push_str(obj.to_str().unwrap());
if !opt.CXXFLAGS.is_empty()
{
args.push(' ');
args.push_str(opt.CXXFLAGS.as_str());
}
if !opt.DEFS.is_empty()
{
args.push(' ');
args.push_str(opt.DEFS.as_str());
}
if !opt.INCLUDES.is_empty()
{
args.push(' ');
args.push_str(opt.INCLUDES.as_str());
}
}
else
{
// return Ok(String::from("Ok"));
return Err(Error::from(ErrorKind::NotFound));
}
opt.Object.push(obj.clone());
if (opt.Rebuild == true) || (obj.exists()==false) || cmpFileTime(&obj,&src)
{
// println!("{:?} {:?}", cmd.as_str(),args);
match misc::execAsync(cmd.as_str(),args,opt.IsSilent)
{
Ok(x) =>{return Ok(Some(x))}
Err(e) =>{panic!("building {:?} error {:?}",src,e);}
}
// misc::execSync(cmd.as_str(),args);
// buildObjectFile(opt)?;
}
return Ok(None);
// return Err(Error::from(ErrorKind::NotFound));
// return Ok(String::from("Ok"));
}
fn buildObject(opt: &mut misc::OptionData) -> Result<String> {
let mut p:Vec<Child> = vec![];
loop
{
if (p.len() as u64) < opt.Jobs
{
match buildObjectFile(opt)
{
// building
Ok(Some(x)) =>{
p.push(x);
continue;
}
// no building
Ok(None) =>{
continue;
}
// no file
Err(_e) =>{
// break;
}
}
}
if p.is_empty()
{
break;
}
for _ in 0..opt.Jobs {
if p.is_empty()
{
break;
}
let mut v = p.pop().unwrap();
match v.try_wait()
{
Ok(Some(status)) =>{
if !status.success()
{
return Err(Error::from(ErrorKind::Other));
}
}
Ok(None) =>{
p.push(v);
}
Err(e) =>{
println!("{:?}", e);
}
}
}
}
// buildObjectFile(opt)?;
return Ok(String::from("Ok"));
}
fn buildProgram(opt: &misc::OptionData) -> Result<String> {
let mut cross:PathBuf;
let mut args:String = String::new();
let mut cmd:String;
let mut target:String;
let mut targetExt:String;
targetExt = opt.Target.clone();
target = opt.Target.clone();
if cfg!(target_os = "windows")
{
match PathBuf::from(&opt.Target).extension()
{
Some(x) =>{
target = target.replace(&format!(".{:?}",x), "");
}
None =>{
targetExt.push_str(".exe");
}
}
}
else
{
match PathBuf::from(&opt.Target).extension()
{
Some(x) =>{
target = target.replace(&format!(".{:?}",x), "");
}
None =>{
targetExt.push_str(".out");
}
}
}
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.LD);
cmd = String::from(cross.to_str().unwrap());
}
else
{
cmd = opt.CROSSCOMPILE.clone();
cmd.push_str(opt.LD.as_str());
}
args.push_str("-o");
args.push(' ');
args.push_str(targetExt.as_str());
for i in &opt.Object{
if !args.is_empty()
{
args.push(' ');
}
args.push_str(i.to_str().unwrap());
}
for i in &opt.DependObject{
if !args.is_empty()
{
args.push(' ');
}
args.push_str(i.to_str().unwrap());
}
if !opt.LDFLAGS.is_empty()
{
args.push(' ');
args.push_str(opt.LDFLAGS.as_str());
}
if opt.IsMap
{
let map = format!(" -Wl,-Map={}.map",opt.Target);
args.push_str(map.as_str());
}
if !opt.LIBPATH.is_empty()
{
args.push(' ');
args.push_str(opt.LIBPATH.as_str());
}
if !opt.LIBS.is_empty()
{
args.push(' ');
args.push_str(opt.LIBS.as_str());
}
misc::execSync(cmd.as_str(),args,opt.IsSilent)?;
if opt.IsAsm
{
let tcmd;
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.OBJDUMP);
tcmd = format!("{} -D -S {} > {}.asm",cross.to_str().unwrap(),target,opt.Target);
}
else
{
tcmd = format!("{}{} -D -S {} > {}.asm",opt.CROSSCOMPILE.clone(),opt.OBJDUMP.as_str(),targetExt,target);
}
misc::cmdSync(&vec![tcmd],&env::current_dir().unwrap());
}
if opt.IsStrip
{
let mut tcmd;
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.STRIP);
tcmd = String::from(cross.to_str().unwrap());
}
else
{
tcmd = opt.CROSSCOMPILE.clone();
tcmd.push_str(opt.STRIP.as_str());
}
let targs:String = format!("{}",targetExt);
misc::execSync(tcmd.as_str(),targs,opt.IsSilent)?;
}
if opt.IsBinary
{
let mut tcmd;
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.OBJCOPY);
tcmd = String::from(cross.to_str().unwrap());
}
else
{
tcmd = opt.CROSSCOMPILE.clone();
tcmd.push_str(opt.OBJCOPY.as_str());
}
let targs:String = format!("-O binary {} {}.bin",targetExt,target);
misc::execSync(tcmd.as_str(),targs,opt.IsSilent)?;
}
return Ok(String::from("Ok"));
}
fn buildStaticLib(opt: &misc::OptionData) -> Result<String>
{
let mut cross:PathBuf;
let mut args:String = String::new();
let mut cmd:String;
// let mut target:String;
let mut targetExt:String;
targetExt = opt.Target.clone();
// target = opt.Target.clone();
if cfg!(target_os = "windows")
{
match PathBuf::from(&opt.Target).extension()
{
Some(_x) =>{
// target = target.replace(&format!(".{:?}",x), "");
}
None =>{
targetExt.push_str(".a");
}
}
}
else
{
match PathBuf::from(&opt.Target).extension()
{
Some(_x) =>{
// target = target.replace(&format!(".{:?}",x), "");
}
None =>{
targetExt.push_str(".a");
}
}
}
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.AR);
cmd = String::from(cross.to_str().unwrap());
}
else
{
cmd = opt.CROSSCOMPILE.clone();
cmd.push_str(opt.AR.as_str());
}
args.push_str("r");
args.push(' ');
args.push_str(targetExt.as_str());
for i in &opt.Object{
if !args.is_empty()
{
args.push(' ');
}
args.push_str(i.to_str().unwrap());
}
for i in &opt.DependObject{
if !args.is_empty()
{
args.push(' ');
}
args.push_str(i.to_str().unwrap());
}
if !opt.LIBPATH.is_empty()
{
args.push(' ');
args.push_str(opt.LIBPATH.as_str());
}
if !opt.LIBS.is_empty()
{
args.push(' ');
args.push_str(opt.LIBS.as_str());
}
misc::execSync(cmd.as_str(),args,opt.IsSilent)?;
return Ok(String::from("Ok"));
}
fn buildSharelib(opt: &misc::OptionData) -> Result<String>
{
let mut cross:PathBuf;
let mut args:String = String::new();
let mut cmd:String;
let mut target:String;
let mut targetExt:String;
targetExt = opt.Target.clone();
target = opt.Target.clone();
if cfg!(target_os = "windows")
{
match PathBuf::from(&opt.Target).extension()
{
Some(x) =>{
target = target.replace(&format!(".{:?}",x), "");
}
None =>{
targetExt.push_str(".dll");
} | else
{
match PathBuf::from(&opt.Target).extension()
{
Some(x) =>{
target = target.replace(&format!(".{:?}",x), "");
}
None =>{
targetExt.push_str(".so");
}
}
}
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.LD);
cmd = String::from(cross.to_str().unwrap());
}
else
{
cmd = opt.CROSSCOMPILE.clone();
cmd.push_str(opt.LD.as_str());
}
args.push_str("-shared");
args.push(' ');
args.push_str("-o");
args.push(' ');
args.push_str(targetExt.as_str());
for i in &opt.Object{
if !args.is_empty()
{
args.push(' ');
}
args.push_str(i.to_str().unwrap());
}
for i in &opt.DependObject{
if !args.is_empty()
{
args.push(' ');
}
args.push_str(i.to_str().unwrap());
}
if !opt.LDFLAGS.is_empty()
{
args.push(' ');
args.push_str(opt.LDFLAGS.as_str());
}
if opt.IsMap
{
let map = format!(" -Wl,-Map={}.map",target);
args.push_str(map.as_str());
}
if !opt.LIBPATH.is_empty()
{
args.push(' ');
args.push_str(opt.LIBPATH.as_str());
}
if !opt.LIBS.is_empty()
{
args.push(' ');
args.push_str(opt.LIBS.as_str());
}
misc::execSync(cmd.as_str(),args,opt.IsSilent)?;
if opt.IsAsm
{
let tcmd;
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.OBJDUMP);
tcmd = format!("{} -D -S {} > {}.asm",cross.to_str().unwrap(),targetExt,target);
}
else
{
tcmd = format!("{}{} -D -S {} > {}.asm",opt.CROSSCOMPILE.clone(),opt.OBJDUMP.as_str(),targetExt,target);
}
misc::cmdSync(&vec![tcmd],&env::current_dir().unwrap());
}
if opt.IsStrip
{
let mut tcmd;
if PathBuf::from(&opt.CROSSCOMPILE).is_dir()
{
cross = PathBuf::from(&opt.CROSSCOMPILE);
cross.push(&opt.STRIP);
tcmd = String::from(cross.to_str().unwrap());
}
else
{
tcmd = opt.CROSSCOMPILE.clone();
tcmd.push_str(opt.STRIP.as_str());
}
let targs:String = format!("{}",targetExt);
misc::execSync(tcmd.as_str(),targs,opt.IsSilent)?;
}
return Ok(String::from("Ok"));
}
fn buildOption(optName: &PathBuf, fileMap: &HashMap<PathBuf, Value>) -> Result<String> {
// println!("buildOption Entry optName:{:?}", optName);
let fileJson: Value;
match fileMap.get(optName) {
Some(x) => {
fileJson = x.clone();
match x["DependRock"].as_array() {
Some(z) => {
for i in z {
match i.as_str() {
Some(s) => {
buildOption(&PathBuf::from(s), fileMap)?;
}
None => {}
}
}
}
None => {}
}
}
None => {
return Ok(String::from("Ok"));
}
}
let fileAbsDir = PathBuf::from(optName.parent().unwrap());
misc::cdDir(&fileAbsDir);
misc::cmdSync(&misc::valueToVec(&fileJson["CmdStart"]), &fileAbsDir);
let mut option: misc::OptionData = misc::expandOption(&fileJson);
if !option.IsSilent
{
println!("Building {:?}", optName);
println!("{:?}", option);
}
match option.Type.as_str() {
"Program" => {
buildObject(&mut option)?;
buildProgram(&option)?;
}
"StaticLib" => {
buildObject(&mut option)?;
buildStaticLib(&option)?;
}
"ShareLib" => {
buildObject(&mut option)?;
buildSharelib(&option)?;
}
"Object" => {
buildObject(&mut option)?;
}
__ => {
panic!("Type No Support!");
}
}
misc::cmdSync(&misc::valueToVec(&fileJson["CmdStop"]), &fileAbsDir);
return Ok(String::from("Ok"));
}
pub fn building(rockfile: PathBuf, rebuild: bool) -> Result<String> {
// let buildDir = rockfile.parent().unwrap().canonicalize().unwrap();
// misc::cdDir(buildDir);
// let file = File::open(rockfile)?;
// let reader = BufReader::new(file);
// let rootfileJson:Value = serde_json::from_reader(reader)?;
// let mut data:misc::OptionData = misc::getOptionFromJson(&rootfileJson);
// data.Rebuild = rebuild;
// println!("{:?}",data);
// println!("getAbsPath: {:?}", misc::getAbsPath(&rockfile));
// println!("getAbsPath: {:?}", misc::getAbsPath(&PathBuf::from("\\\\?\\D:\\work\\rust\\demo\\rock.json")));
// println!("getAbsPath: {:?}", misc::getAbsPath(&PathBuf::from("D:\\work\\rust\\demo\\rock.json")));
// println!("getAbsPath: {:?}", misc::getAbsDir(&PathBuf::from("..\\demo\\rock.json")));
let fileAbsPath = misc::getAbsPath(&rockfile);
let mut fileMap = HashMap::new();
misc::getOption(&fileAbsPath, &mut fileMap);
// println!("{:?}", fileMap);
misc::initEnvs(
&fileMap.get(&fileAbsPath).unwrap(),
rebuild,
&misc::getAbsDir(&rockfile),
);
buildOption(&fileAbsPath, &mut fileMap)?;
return Ok(String::from("Ok"));
} | }
} |
player.rs | use rltk::{VirtualKeyCode, Rltk, Point};
use specs::prelude::*;
use std::cmp::{max, min};
use super::{Position, Player, Viewshed, State, Map, RunState, CombatStats, WantsToMelee, Item,
gamelog::GameLog, WantsToPickupItem, TileType, Monster, HungerClock, HungerState};
pub fn try_move_player(delta_x: i32, delta_y: i32, ecs: &mut World) {
let mut positions = ecs.write_storage::<Position>();
let players = ecs.read_storage::<Player>();
let mut viewsheds = ecs.write_storage::<Viewshed>();
let entities = ecs.entities();
let combat_stats = ecs.read_storage::<CombatStats>();
let map = ecs.fetch::<Map>();
let mut wants_to_melee = ecs.write_storage::<WantsToMelee>();
for (entity, _player, pos, viewshed) in (&entities, &players, &mut positions, &mut viewsheds).join() {
if pos.x + delta_x < 1 || pos.x + delta_x > map.width-1 || pos.y + delta_y < 1 || pos.y + delta_y > map.height-1 { return; }
let destination_idx = map.xy_idx(pos.x + delta_x, pos.y + delta_y);
for potential_target in map.tile_content[destination_idx].iter() {
let target = combat_stats.get(*potential_target);
if let Some(_target) = target {
wants_to_melee.insert(entity, WantsToMelee{ target: *potential_target }).expect("Add target failed");
return;
}
}
if !map.blocked[destination_idx] {
pos.x = min(79 , max(0, pos.x + delta_x));
pos.y = min(49, max(0, pos.y + delta_y));
viewshed.dirty = true;
let mut ppos = ecs.write_resource::<Point>();
ppos.x = pos.x;
ppos.y = pos.y;
}
}
}
pub fn try_next_level(ecs: &mut World) -> bool {
let player_pos = ecs.fetch::<Point>();
let map = ecs.fetch::<Map>();
let player_idx = map.xy_idx(player_pos.x, player_pos.y);
if map.tiles[player_idx] == TileType::DownStairs {
true
} else {
let mut gamelog = ecs.fetch_mut::<GameLog>();
gamelog.entries.push("There is no way down from here.".to_string());
false
}
}
fn get_item(ecs: &mut World) {
let player_pos = ecs.fetch::<Point>();
let player_entity = ecs.fetch::<Entity>();
let entities = ecs.entities();
let items = ecs.read_storage::<Item>();
let positions = ecs.read_storage::<Position>();
let mut gamelog = ecs.fetch_mut::<GameLog>();
let mut target_item : Option<Entity> = None;
for (item_entity, _item, position) in (&entities, &items, &positions).join() {
if position.x == player_pos.x && position.y == player_pos.y {
target_item = Some(item_entity);
}
}
match target_item {
None => gamelog.entries.push("There is nothing here to pick up.".to_string()),
Some(item) => {
let mut pickup = ecs.write_storage::<WantsToPickupItem>();
pickup.insert(*player_entity, WantsToPickupItem{ collected_by: *player_entity, item }).expect("Unable to insert want to pickup");
}
}
}
fn skip_turn(ecs: &mut World) -> RunState {
let player_entity = ecs.fetch::<Entity>();
let viewshed_components = ecs.read_storage::<Viewshed>();
let monsters = ecs.read_storage::<Monster>();
let worldmap_resource = ecs.fetch::<Map>();
let mut can_heal = true;
let viewshed = viewshed_components.get(*player_entity).unwrap();
for tile in viewshed.visible_tiles.iter() {
let idx = worldmap_resource.xy_idx(tile.x, tile.y);
for entity_id in worldmap_resource.tile_content[idx].iter() {
let mob = monsters.get(*entity_id);
match mob {
None => {}
Some(_) => { can_heal = false; }
}
}
}
let hunger_clocks = ecs.read_storage::<HungerClock>();
let hc = hunger_clocks.get(*player_entity);
if let Some(hc) = hc {
match hc.state {
HungerState::Hungry => can_heal = false,
HungerState::Starving => can_heal = false,
_ => {}
}
}
if can_heal {
let mut health_components = ecs.write_storage::<CombatStats>();
let player_hp = health_components.get_mut(*player_entity).unwrap();
player_hp.hp = i32::min(player_hp.hp + 1, player_hp.max_hp);
}
RunState::PlayerTurn
}
pub fn player_input(gs: &mut State, ctx: &mut Rltk) -> RunState | {
// Player movement
match ctx.key {
None => { return RunState::AwaitingInput } // Nothing happened
Some(key) => match key {
VirtualKeyCode::Left |
VirtualKeyCode::Numpad4 |
VirtualKeyCode::H => try_move_player(-1, 0, &mut gs.ecs),
VirtualKeyCode::Right |
VirtualKeyCode::Numpad6 |
VirtualKeyCode::L => try_move_player(1, 0, &mut gs.ecs),
VirtualKeyCode::Up |
VirtualKeyCode::Numpad8 |
VirtualKeyCode::K => try_move_player(0, -1, &mut gs.ecs),
VirtualKeyCode::Down |
VirtualKeyCode::Numpad2 |
VirtualKeyCode::J => try_move_player(0, 1, &mut gs.ecs),
// Diagonals
VirtualKeyCode::Numpad9 |
VirtualKeyCode::U => try_move_player(1, -1, &mut gs.ecs),
VirtualKeyCode::Numpad7 |
VirtualKeyCode::Y => try_move_player(-1, -1, &mut gs.ecs),
VirtualKeyCode::Numpad3 |
VirtualKeyCode::N => try_move_player(1, 1, &mut gs.ecs),
VirtualKeyCode::Numpad1 |
VirtualKeyCode::B => try_move_player(-1, 1, &mut gs.ecs),
// Skip Turn
VirtualKeyCode::Numpad5 => return skip_turn(&mut gs.ecs),
VirtualKeyCode::Space => return skip_turn(&mut gs.ecs),
// Level changes
VirtualKeyCode::Period => {
if try_next_level(&mut gs.ecs) {
return RunState::NextLevel;
}
}
// Picking up items
VirtualKeyCode::G => get_item(&mut gs.ecs),
VirtualKeyCode::I => return RunState::ShowInventory,
VirtualKeyCode::D => return RunState::ShowDropItem,
VirtualKeyCode::R => return RunState::ShowRemoveItem,
// Save and Quit
VirtualKeyCode::Escape => return RunState::SaveGame,
_ => { return RunState::AwaitingInput }
},
}
RunState::PlayerTurn
} |
|
cursor.rs | //! Cursor utilities
//!
//! This module contains bindings to the `libwayland-cursor.so` library.
//!
//! These utilities allow you to load cursor images in order to match
//! your cursors to the ones of the system.
//!
//! First of all, the function `load_theme` will allow you to load a
//! `CursorTheme`, which represents the full cursor theme.
//!
//! From this theme, you can load a specific `Cursor`, which can contain
//! several images if the cursor is animated. It also provides you with the
//! means of querying which frame of the animation should be displayed at
//! what time, as well as handles to the buffers containing these frames, to
//! attach them to a wayland surface.
use protocol::wl_buffer::WlBuffer;
use protocol::wl_shm::WlShm;
use std::ffi::{CStr, CString};
use std::marker::PhantomData;
use std::ops::Deref;
use std::os::raw::c_int;
use std::ptr;
use wayland_sys::cursor::*;
use Proxy;
/// Checks if the wayland-cursor library is available and can be used
///
/// Trying to call any function of this module if the library cannot
/// be used will result in a panic.
pub fn is_available() -> bool {
is_lib_available()
}
/// Represents a cursor theme loaded from the system.
pub struct CursorTheme {
theme: *mut wl_cursor_theme,
}
unsafe impl Send for CursorTheme {}
/// Attempts to load a cursor theme.
///
/// If no name is given or the requested theme is not found, the default theme
/// will be loaded.
///
/// Other arguments are the requested size for the cursor images (ex: 16)
/// and a handle to the global `WlShm` object.
///
/// # Panics
///
/// - Panics if the `wayland-cursor` lib is not available
/// (see `is_available()` function) in this module.
/// - Panics in case of memory allocation failure.
/// - Panics if `name` contains an interior null.
pub fn load_theme(name: Option<&str>, size: u32, shm: &WlShm) -> CursorTheme {
let ptr = if let Some(theme) = name {
let cstr = CString::new(theme).expect("Theme name contained an interior null.");
unsafe {
ffi_dispatch!(
WAYLAND_CURSOR_HANDLE,
wl_cursor_theme_load,
cstr.as_ptr(),
size as c_int,
shm.as_ref().c_ptr()
)
}
} else {
unsafe {
ffi_dispatch!(
WAYLAND_CURSOR_HANDLE,
wl_cursor_theme_load,
ptr::null(),
size as c_int,
shm.as_ref().c_ptr()
)
}
};
assert!(!ptr.is_null(), "Memory allocation failure while loading a theme.");
CursorTheme { theme: ptr }
}
impl CursorTheme {
/// Retrieves a cursor from the theme.
///
/// Returns `None` if this cursor is not provided by the theme.
///
/// # Panics
///
/// Panics if the name contains an interior null.
pub fn get_cursor(&self, name: &str) -> Option<Cursor> {
let cstr = CString::new(name).expect("Cursor name contained an interior null.");
let ptr = unsafe {
ffi_dispatch!(
WAYLAND_CURSOR_HANDLE,
wl_cursor_theme_get_cursor,
self.theme,
cstr.as_ptr()
)
};
if ptr.is_null() {
None
} else {
Some(Cursor {
_theme: PhantomData,
cursor: ptr,
})
}
}
}
impl Drop for CursorTheme { | ffi_dispatch!(WAYLAND_CURSOR_HANDLE, wl_cursor_theme_destroy, self.theme);
}
}
}
/// A cursor from a theme. Can contain several images if animated.
pub struct Cursor<'a> {
_theme: PhantomData<&'a CursorTheme>,
cursor: *mut wl_cursor,
}
unsafe impl<'a> Send for Cursor<'a> {}
impl<'a> Cursor<'a> {
/// Returns the name of this cursor.
pub fn name(&self) -> String {
let name = unsafe { CStr::from_ptr((*self.cursor).name) };
name.to_string_lossy().into_owned()
}
/// Returns the number of images contained in this animated cursor
pub fn image_count(&self) -> usize {
let count = unsafe { (*self.cursor).image_count };
count as usize
}
/// Returns which frame of an animated cursor should be displayed at a given time.
///
/// The time is given in milliseconds after the beginning of the animation.
pub fn frame(&self, duration: u32) -> usize {
let frame = unsafe { ffi_dispatch!(WAYLAND_CURSOR_HANDLE, wl_cursor_frame, self.cursor, duration) };
frame as usize
}
/// Returns the frame number and its remaining duration.
///
/// Same as `frame()`, but also returns the amount of milliseconds this
/// frame should continue to be displayed.
pub fn frame_and_duration(&self, duration: u32) -> (usize, u32) {
let mut out_duration = 0u32;
let frame = unsafe {
ffi_dispatch!(
WAYLAND_CURSOR_HANDLE,
wl_cursor_frame_and_duration,
self.cursor,
duration,
&mut out_duration as *mut u32
)
} as usize;
(frame, out_duration)
}
/// Returns a `CursorImageBuffer` containing the given image of an animation.
///
/// It can be attached to a surface as a classic `WlBuffer`.
///
/// Returns `None` if the frame is out of bounds.
///
/// Note: destroying this buffer (using the `destroy` method) will corrupt
/// your theme data, so you might not want to do it.
pub fn frame_buffer(&self, frame: usize) -> Option<CursorImageBuffer> {
if frame >= self.image_count() {
None
} else {
unsafe {
let image = *(*self.cursor).images.offset(frame as isize);
let ptr = ffi_dispatch!(WAYLAND_CURSOR_HANDLE, wl_cursor_image_get_buffer, image);
let buffer = Proxy::from_c_ptr(ptr).into();
Some(CursorImageBuffer {
_cursor: PhantomData,
buffer,
})
}
}
}
/// Returns the metadata associated with a given frame of the animation.
///
/// The tuple contains: `(width, height, hotspot_x, hotspot_y, delay)`
///
/// Returns `None` if the frame is out of bounds.
pub fn frame_info(&self, frame: usize) -> Option<(u32, u32, u32, u32, u32)> {
if frame >= self.image_count() {
None
} else {
let image = unsafe { &**(*self.cursor).images.offset(frame as isize) };
Some((
image.width,
image.height,
image.hotspot_x,
image.hotspot_y,
image.delay,
))
}
}
}
/// A buffer containing a cursor image.
///
/// You can access the `WlBuffer` via `Deref`.
///
/// Note that this proxy will be considered as "unmanaged" by the crate, as such you should
/// not try to act on it beyond assigning it to `wl_surface`s.
pub struct CursorImageBuffer<'a> {
_cursor: PhantomData<&'a Cursor<'a>>,
buffer: WlBuffer,
}
unsafe impl<'a> Send for CursorImageBuffer<'a> {}
impl<'a> Deref for CursorImageBuffer<'a> {
type Target = WlBuffer;
fn deref(&self) -> &WlBuffer {
&self.buffer
}
} | fn drop(&mut self) {
unsafe { |
queue.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package diskqueue
import (
"errors"
"fmt"
"os"
"sync"
"github.com/elastic/beats/v7/libbeat/common"
"github.com/elastic/beats/v7/libbeat/feature"
"github.com/elastic/beats/v7/libbeat/logp"
"github.com/elastic/beats/v7/libbeat/publisher/queue"
)
// diskQueue is the internal type representing a disk-based implementation
// of queue.Queue.
type diskQueue struct {
logger *logp.Logger
settings Settings
// Metadata related to the segment files.
segments diskQueueSegments
// Metadata related to consumer acks / positions of the oldest remaining
// frame.
acks *diskQueueACKs
// The queue's helper loops, each of which is run in its own goroutine.
readerLoop *readerLoop
writerLoop *writerLoop
deleterLoop *deleterLoop
// Wait group for shutdown of the goroutines associated with this queue:
// reader loop, writer loop, deleter loop, and core loop (diskQueue.run()).
waitGroup sync.WaitGroup
// writing is true if the writer loop is processing a request, false
// otherwise.
writing bool
// If writing is true, then writeRequestSize equals the number of bytes it
// contained. Used to calculate how much free capacity the queue has left
// after all scheduled writes have been completed (see canAcceptFrameOfSize).
writeRequestSize uint64
// reading is true if the reader loop is processing a request, false
// otherwise.
reading bool
// deleting is true if the deleter loop is processing a request, false
// otherwise.
deleting bool
// The API channel used by diskQueueProducer to write events.
producerWriteRequestChan chan producerWriteRequest
// pendingFrames is a list of all incoming data frames that have been
// accepted by the queue and are waiting to be sent to the writer loop.
// Segment ids in this list always appear in sorted order, even between
// requests (that is, a frame added to this list always has segment id
// at least as high as every previous frame that has ever been added).
pendingFrames []segmentedFrame
// blockedProducers is a list of all producer write requests that are
// waiting for free space in the queue.
blockedProducers []producerWriteRequest
// The channel to signal our goroutines to shut down.
done chan struct{}
}
func | () {
queue.RegisterQueueType(
"disk",
queueFactory,
feature.MakeDetails(
"Disk queue",
"Buffer events on disk before sending to the output.",
feature.Stable))
}
// queueFactory matches the queue.Factory interface, and is used to add the
// disk queue to the registry.
func queueFactory(
ackListener queue.ACKListener, logger *logp.Logger, cfg *common.Config, _ int, // input queue size param is unused.
) (queue.Queue, error) {
settings, err := SettingsForUserConfig(cfg)
if err != nil {
return nil, fmt.Errorf("disk queue couldn't load user config: %w", err)
}
settings.WriteToDiskListener = ackListener
return NewQueue(logger, settings)
}
// NewQueue returns a disk-based queue configured with the given logger
// and settings, creating it if it doesn't exist.
func NewQueue(logger *logp.Logger, settings Settings) (*diskQueue, error) {
logger = logger.Named("diskqueue")
logger.Debugf(
"Initializing disk queue at path %v", settings.directoryPath())
if settings.MaxBufferSize > 0 &&
settings.MaxBufferSize < settings.MaxSegmentSize*2 {
return nil, fmt.Errorf(
"disk queue buffer size (%v) must be at least "+
"twice the segment size (%v)",
settings.MaxBufferSize, settings.MaxSegmentSize)
}
// Create the given directory path if it doesn't exist.
err := os.MkdirAll(settings.directoryPath(), os.ModePerm)
if err != nil {
return nil, fmt.Errorf("couldn't create disk queue directory: %w", err)
}
// Load the previous queue position, if any.
nextReadPosition, err := queuePositionFromPath(settings.stateFilePath())
if err != nil && !errors.Is(err, os.ErrNotExist) {
// Errors reading / writing the position are non-fatal -- we just log a
// warning and fall back on the oldest existing segment, if any.
logger.Warnf("Couldn't load most recent queue position: %v", err)
}
if nextReadPosition.frameIndex == 0 {
// If the previous state was written by an older version, it may lack
// the frameIndex field. In this case we reset the read offset within
// the segment, which may cause one-time retransmission of some events
// from a previous version, but ensures that our metrics are consistent.
// In the more common case that frameIndex is 0 because this segment
// simply hasn't been read yet, setting byteIndex to 0 is a no-op.
nextReadPosition.byteIndex = 0
}
positionFile, err := os.OpenFile(
settings.stateFilePath(), os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
// This is not the _worst_ error: we could try operating even without a
// position file. But it indicates a problem with the queue permissions on
// disk, which keeps us from tracking our position within the segment files
// and could also prevent us from creating new ones, so we treat this as a
// fatal error on startup rather than quietly providing degraded
// performance.
return nil, fmt.Errorf("couldn't write to state file: %v", err)
}
// Index any existing data segments to be placed in segments.reading.
initialSegments, err :=
scanExistingSegments(logger, settings.directoryPath())
if err != nil {
return nil, err
}
var nextSegmentID segmentID
if len(initialSegments) > 0 {
// Initialize nextSegmentID to the first ID after the existing segments.
lastID := initialSegments[len(initialSegments)-1].id
nextSegmentID = lastID + 1
}
// If any of the initial segments are older than the current queue
// position, move them directly to the acked list where they can be
// deleted.
ackedSegments := []*queueSegment{}
readSegmentID := nextReadPosition.segmentID
for len(initialSegments) > 0 && initialSegments[0].id < readSegmentID {
ackedSegments = append(ackedSegments, initialSegments[0])
initialSegments = initialSegments[1:]
}
// If the queue position is older than all existing segments, advance
// it to the beginning of the first one.
if len(initialSegments) > 0 && readSegmentID < initialSegments[0].id {
nextReadPosition = queuePosition{segmentID: initialSegments[0].id}
}
// We can compute the active frames right now but still need a way to report
// them to the global beat metrics. For now, just log the total.
// Note that for consistency with existing queue behavior, this excludes
// events that are still present on disk but were already sent and
// acknowledged on a previous run (we probably want to track these as well
// in the future.)
// TODO: pass in a context that queues can use to report these events.
activeFrameCount := 0
for _, segment := range initialSegments {
activeFrameCount += int(segment.frameCount)
}
activeFrameCount -= int(nextReadPosition.frameIndex)
logger.Infof("Found %d existing events on queue start", activeFrameCount)
queue := &diskQueue{
logger: logger,
settings: settings,
segments: diskQueueSegments{
reading: initialSegments,
acked: ackedSegments,
nextID: nextSegmentID,
nextReadPosition: nextReadPosition.byteIndex,
},
acks: newDiskQueueACKs(logger, nextReadPosition, positionFile),
readerLoop: newReaderLoop(settings),
writerLoop: newWriterLoop(logger, settings),
deleterLoop: newDeleterLoop(settings),
producerWriteRequestChan: make(chan producerWriteRequest),
done: make(chan struct{}),
}
// We wait for four goroutines on shutdown: core loop, reader loop,
// writer loop, deleter loop.
queue.waitGroup.Add(4)
// Start the goroutines and return the queue!
go func() {
queue.readerLoop.run()
queue.waitGroup.Done()
}()
go func() {
queue.writerLoop.run()
queue.waitGroup.Done()
}()
go func() {
queue.deleterLoop.run()
queue.waitGroup.Done()
}()
go func() {
queue.run()
queue.waitGroup.Done()
}()
return queue, nil
}
//
// diskQueue implementation of the queue.Queue interface
//
func (dq *diskQueue) Close() error {
// Closing the done channel signals to the core loop that it should
// shut down the other helper goroutines and wrap everything up.
close(dq.done)
dq.waitGroup.Wait()
return nil
}
func (dq *diskQueue) BufferConfig() queue.BufferConfig {
return queue.BufferConfig{MaxEvents: 0}
}
func (dq *diskQueue) Producer(cfg queue.ProducerConfig) queue.Producer {
return &diskQueueProducer{
queue: dq,
config: cfg,
encoder: newEventEncoder(),
done: make(chan struct{}),
}
}
func (dq *diskQueue) Consumer() queue.Consumer {
return &diskQueueConsumer{queue: dq, done: make(chan struct{})}
}
| init |
service_windows.go | /*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"time"
"unsafe"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/services/server"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"golang.org/x/sys/windows"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/debug"
"golang.org/x/sys/windows/svc/mgr"
)
var (
serviceNameFlag string
registerServiceFlag bool
unregisterServiceFlag bool
runServiceFlag bool
kernel32 = windows.NewLazySystemDLL("kernel32.dll")
setStdHandle = kernel32.NewProc("SetStdHandle")
allocConsole = kernel32.NewProc("AllocConsole")
oldStderr windows.Handle
panicFile *os.File
service *handler
)
// serviceFlags returns an array of flags for configuring containerd to run
// as a Windows service under control of SCM.
func serviceFlags() []cli.Flag {
return []cli.Flag{
cli.StringFlag{
Name: "service-name",
Usage: "Set the Windows service name",
Value: "containerd",
},
cli.BoolFlag{
Name: "register-service",
Usage: "Register the service and exit",
},
cli.BoolFlag{
Name: "unregister-service",
Usage: "Unregister the service and exit",
},
cli.BoolFlag{
Name: "run-service",
Usage: "",
Hidden: true,
},
}
}
// applyPlatformFlags applies platform-specific flags.
func | (context *cli.Context) {
if s := context.GlobalString("service-name"); s != "" {
serviceNameFlag = s
}
for _, v := range []struct {
name string
d *bool
}{
{
name: "register-service",
d: ®isterServiceFlag,
},
{
name: "unregister-service",
d: &unregisterServiceFlag,
},
{
name: "run-service",
d: &runServiceFlag,
},
} {
*v.d = context.GlobalBool(v.name)
}
}
type handler struct {
fromsvc chan error
s *server.Server
done chan struct{} // Indicates back to app main to quit
}
func getServicePath() (string, error) {
p, err := exec.LookPath(os.Args[0])
if err != nil {
return "", err
}
return filepath.Abs(p)
}
func registerService() error {
p, err := getServicePath()
if err != nil {
return err
}
m, err := mgr.Connect()
if err != nil {
return err
}
defer m.Disconnect()
c := mgr.Config{
ServiceType: windows.SERVICE_WIN32_OWN_PROCESS,
StartType: mgr.StartAutomatic,
ErrorControl: mgr.ErrorNormal,
DisplayName: "Containerd",
Description: "Container runtime",
}
// Configure the service to launch with the arguments that were just passed.
args := []string{"--run-service"}
for _, a := range os.Args[1:] {
if a != "--register-service" && a != "--unregister-service" {
args = append(args, a)
}
}
s, err := m.CreateService(serviceNameFlag, p, c, args...)
if err != nil {
return err
}
defer s.Close()
// See http://stackoverflow.com/questions/35151052/how-do-i-configure-failure-actions-of-a-windows-service-written-in-go
const (
scActionNone = 0
scActionRestart = 1
serviceConfigFailureActions = 2
)
type serviceFailureActions struct {
ResetPeriod uint32
RebootMsg *uint16
Command *uint16
ActionsCount uint32
Actions uintptr
}
type scAction struct {
Type uint32
Delay uint32
}
t := []scAction{
{Type: scActionRestart, Delay: uint32(15 * time.Second / time.Millisecond)},
{Type: scActionRestart, Delay: uint32(15 * time.Second / time.Millisecond)},
{Type: scActionNone},
}
lpInfo := serviceFailureActions{ResetPeriod: uint32(24 * time.Hour / time.Second), ActionsCount: uint32(3), Actions: uintptr(unsafe.Pointer(&t[0]))}
err = windows.ChangeServiceConfig2(s.Handle, serviceConfigFailureActions, (*byte)(unsafe.Pointer(&lpInfo)))
if err != nil {
return err
}
return nil
}
func unregisterService() error {
m, err := mgr.Connect()
if err != nil {
return err
}
defer m.Disconnect()
s, err := m.OpenService(serviceNameFlag)
if err != nil {
return err
}
defer s.Close()
err = s.Delete()
if err != nil {
return err
}
return nil
}
// registerUnregisterService is an entrypoint early in the daemon startup
// to handle (un-)registering against Windows Service Control Manager (SCM).
// It returns an indication to stop on successful SCM operation, and an error.
func registerUnregisterService(root string) (bool, error) {
if unregisterServiceFlag {
if registerServiceFlag {
return true, errors.Wrap(errdefs.ErrInvalidArgument, "--register-service and --unregister-service cannot be used together")
}
return true, unregisterService()
}
if registerServiceFlag {
return true, registerService()
}
if runServiceFlag {
// Allocate a conhost for containerd here. We don't actually use this
// at all in containerd, but it will be inherited by any processes
// containerd executes, so they won't need to allocate their own
// conhosts. This is important for two reasons:
// - Creating a conhost slows down process launch.
// - We have seen reliability issues when launching many processes.
// Sometimes the process invocation will fail due to an error when
// creating the conhost.
//
// This needs to be done before initializing the panic file, as
// AllocConsole sets the stdio handles to point to the new conhost,
// and we want to make sure stderr goes to the panic file.
r, _, err := allocConsole.Call()
if r == 0 && err != nil {
return true, fmt.Errorf("error allocating conhost: %s", err)
}
if err := initPanicFile(filepath.Join(root, "panic.log")); err != nil {
return true, err
}
logrus.SetOutput(ioutil.Discard)
}
return false, nil
}
// launchService is the entry point for running the daemon under SCM.
func launchService(s *server.Server, done chan struct{}) error {
if !runServiceFlag {
return nil
}
h := &handler{
fromsvc: make(chan error),
s: s,
done: done,
}
interactive, err := svc.IsAnInteractiveSession()
if err != nil {
return err
}
service = h
go func() {
if interactive {
err = debug.Run(serviceNameFlag, h)
} else {
err = svc.Run(serviceNameFlag, h)
}
h.fromsvc <- err
}()
// Wait for the first signal from the service handler.
err = <-h.fromsvc
if err != nil {
return err
}
return nil
}
func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) {
s <- svc.Status{State: svc.StartPending, Accepts: 0}
// Unblock launchService()
h.fromsvc <- nil
s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)}
Loop:
for c := range r {
switch c.Cmd {
case svc.Interrogate:
s <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
s <- svc.Status{State: svc.StopPending, Accepts: 0}
h.s.Stop()
break Loop
}
}
removePanicFile()
close(h.done)
return false, 0
}
func initPanicFile(path string) error {
var err error
panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0)
if err != nil {
return err
}
st, err := panicFile.Stat()
if err != nil {
return err
}
// If there are contents in the file already, move the file out of the way
// and replace it.
if st.Size() > 0 {
panicFile.Close()
os.Rename(path, path+".old")
panicFile, err = os.Create(path)
if err != nil {
return err
}
}
// Update STD_ERROR_HANDLE to point to the panic file so that Go writes to
// it when it panics. Remember the old stderr to restore it before removing
// the panic file.
sh := windows.STD_ERROR_HANDLE
h, err := windows.GetStdHandle(uint32(sh))
if err != nil {
return err
}
oldStderr = h
r, _, err := setStdHandle.Call(uintptr(sh), panicFile.Fd())
if r == 0 && err != nil {
return err
}
// Reset os.Stderr to the panic file (so fmt.Fprintf(os.Stderr,...) actually gets redirected)
os.Stderr = os.NewFile(panicFile.Fd(), "/dev/stderr")
// Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether
log.SetOutput(os.Stderr)
return nil
}
func removePanicFile() {
if st, err := panicFile.Stat(); err == nil {
if st.Size() == 0 {
sh := windows.STD_ERROR_HANDLE
setStdHandle.Call(uintptr(sh), uintptr(oldStderr))
panicFile.Close()
os.Remove(panicFile.Name())
}
}
}
| applyPlatformFlags |
JSBSimWriteXml.py | """
University of Minnesota
Aerospace Engineering and Mechanics - UAV Lab
Copyright 2019 Regents of the University of Minnesota.
See: LICENSE.md for complete license details.
Author: Louis Mueller, Chris Regan
"""
import os.path
from xml.etree import ElementTree as ET
import numpy as np
ft2m = 0.3048
psf2pa = 47.88026
#%% Save the XML in pretty-ish print
def SaveXml(elem, saveFile):
from xml.dom import minidom
uglyXml = ET.tostring(elem, 'utf-8')
prettyXml = minidom.parseString(uglyXml).toprettyxml(indent=' ', newl = '\r\n')
os.makedirs(os.path.dirname(saveFile), exist_ok=True)
with open(saveFile, 'w') as saveXML:
saveXML.write(prettyXml)
saveXML.close()
#%% Function
def Aircraft(oFdm, convertFdm2Jsb, saveJsbPath, aircraftName):
# Start JSB-ML with etree
elemAircraft = ET.Element('fdm_config', version = '2.0', release = 'Alpha')
# Create the Pilot input as a seperate XML file, direct the Aircraft definition to use
fcsFile = 'FlightControl.xml'
ET.SubElement(elemAircraft, 'flight_control', file = fcsFile)
SaveXml(FlightControl(oFdm), os.path.join(saveJsbPath, fcsFile))
# Effectors as a seperate XML file, direct the Aircraft definition to use
effFile = 'Effectors.xml'
ET.SubElement(elemAircraft, 'system', file = effFile)
SaveXml(Effectors(oFdm), os.path.join(saveJsbPath, effFile))
# Create the Mass Properties input as a seperate XML file, direct the Aircraft definition to use
massFile = 'Mass.xml'
ET.SubElement(elemAircraft, 'mass_balance', file = massFile)
SaveXml(MassBalance(oFdm), os.path.join(saveJsbPath, massFile))
# Create the Gear input as a seperate XML file, direct the Aircraft definition to use
gearFile = 'Gear.xml'
ET.SubElement(elemAircraft, 'ground_reactions', file = gearFile)
SaveXml(GroundReactions(oFdm), os.path.join(saveJsbPath, gearFile))
# Create the Propulsion input as a seperate XML file, direct the Aircraft definition to use
propFile = 'Propulsion.xml'
ET.SubElement(elemAircraft, 'propulsion', file = propFile)
SaveXml(Propulsion(oFdm), os.path.join(saveJsbPath, propFile))
# Metrics and Aerodynamics as a seperate XML file, direct the Aircraft definition to use
# Group the Metrics and Aero by similar naming; the dimensionalization inherent to Aero is provided by the Metrics
metricsFile = 'Metrics.xml'
ET.SubElement(elemAircraft, 'metrics', file = metricsFile)
SaveXml(Metrics(oFdm), os.path.join(saveJsbPath, metricsFile))
aeroFile = 'Aero.xml'
ET.SubElement(elemAircraft, 'aerodynamics', file = aeroFile)
SaveXml(Aerodynamics(oFdm, convertFdm2Jsb), os.path.join(saveJsbPath, aeroFile))
# Launcher as a seperate XML file, direct the Aircraft definition to use
if 'Winch' in oFdm.keys() :
winchFile = 'Winch.xml'
ET.SubElement(elemAircraft, 'external_reactions', file = winchFile)
SaveXml(Winch(oFdm), os.path.join(saveJsbPath, winchFile))
# Imu as a seperate XML file, direct the Aircraft definition to use
if 'Imu' in oFdm['Sensor'].keys() :
imuFile = 'SensorImu.xml'
ET.SubElement(elemAircraft, 'system', file = imuFile)
SaveXml(SensorImu(oFdm), os.path.join(saveJsbPath, imuFile))
# Gps as a seperate XML file, direct the Aircraft definition to use
if 'Gps' in oFdm['Sensor'].keys() :
gpsFile = 'SensorGps.xml'
ET.SubElement(elemAircraft, 'system', file = gpsFile)
SaveXml(SensorGps(oFdm), os.path.join(saveJsbPath, gpsFile))
# Pitot as a seperate XML file, direct the Aircraft definition to use
if 'Pitot' in oFdm['Sensor'].keys() :
pitotFile = 'SensorPitot.xml'
ET.SubElement(elemAircraft, 'system', file = pitotFile)
SaveXml(SensorPitot(oFdm), os.path.join(saveJsbPath, pitotFile))
# 5Hole as a seperate XML file, direct the Aircraft definition to use
if '5Hole' in oFdm['Sensor'].keys() :
fiveHoleFile = 'Sensor5Hole.xml'
ET.SubElement(elemAircraft, 'system', file = fiveHoleFile)
SaveXml(Sensor5Hole(oFdm), os.path.join(saveJsbPath, fiveHoleFile))
# Write the Aircraft XML file
saveFile = os.path.join(saveJsbPath, aircraftName + '.xml')
SaveXml(elemAircraft, saveFile)
return(elemAircraft)
#%% Table Generator, Wrapper
def TableGen(elemParent, tableArray, tableSignals, tableBreakPts):
s = tableArray.shape
iAxisRemList = []
for iAxis in range(0, len(s)):
if s[iAxis] == 1:
iAxisRemList.append(iAxis)
# for iRem in iAxisRemList: # XXX
# tableArray = tableArray.squeeze(axis=iRem)
# del tableSignals[iRem]
# del tableBreakPts[iRem]
if len(tableArray.shape)==3:
table = TableGen3D(elemParent, tableArray, tableSignals, tableBreakPts)
elif len(tableArray.shape)==2:
table = TableGen2D(elemParent, tableArray, tableSignals, tableBreakPts)
elif (len(tableArray.shape)==1) & (tableArray.size > 1):
table = TableGen1D(elemParent, tableArray, tableSignals, tableBreakPts)
else:
table = ET.SubElement(elemParent, 'value').text = str(tableArray)
return table
#%% Table Generator, 3D
def TableGen3D(elemParent, tableArray, tableSignals, tableBreakPts):
table = ET.SubElement(elemParent, 'table')
#table = ET.Element('table')
ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals[0]
ET.SubElement(table, 'independentVar', lookup = 'column').text = tableSignals[1]
ET.SubElement(table, 'independentVar', lookup = 'table').text = tableSignals[2]
indentSpace = ' '*4
indentLvl = 4
numRows, numColumns, numTables = np.shape(tableArray)
columnHeader = indentSpace*(indentLvl)
for columnVal in tableBreakPts[1]:
columnHeader += ' '*6 + str(columnVal)
for iTable in range(0, numTables):
tableStr = ['\n' + columnHeader]
for iRow in range(0, numRows):
rowStr = str(tableArray[iRow, :, iTable]).replace('[','').replace(']','').replace('\n', '')
tableStr.append(indentLvl*indentSpace + str(tableBreakPts[0][iRow]) + indentSpace + rowStr)
tableStr = '\n'.join(tableStr) + '\n' + indentLvl*indentSpace # Replace list lines with '/n' strings
ET.SubElement(table, 'tableData', breakPoint = str(tableBreakPts[2][iTable])).text = tableStr
return table
#%% Table Generator, 2D
def TableGen2D(elemParent, tableArray, tableSignals, tableBreakPts):
table = ET.SubElement(elemParent, 'table')
ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals[0]
ET.SubElement(table, 'independentVar', lookup = 'column').text = tableSignals[1]
indentSpace = ' '*4
indentLvl = 4
tableArray = tableArray.transpose()
numRows, numColumns = np.shape(tableArray)
columnHeader = indentSpace*(indentLvl)
for columnVal in tableBreakPts[1]:
columnHeader += ' '*6 + str(columnVal)
tableStr = ['\n' + columnHeader]
for iRow in range(0, numRows):
rowStr = str(tableArray[iRow]).replace('[','').replace(']','').replace('\n', '')
tableStr.append(indentLvl*indentSpace + str(tableBreakPts[0][iRow]) + indentSpace + rowStr)
tableStr = '\n'.join(tableStr) + '\n' + indentLvl*indentSpace # Replace list lines with '/n' strings
ET.SubElement(table, 'tableData').text = tableStr
return table
#%% Table Generator, 1D
def TableGen1D(elemParent, tableArray, tableSignals, tableBreakPts):
table = ET.SubElement(elemParent, 'table')
ET.SubElement(table, 'independentVar', lookup = 'row').text = tableSignals
indentSpace = ' '*4
indentLvl = 4
numRows = np.shape(tableArray)[0]
tableStr = ['\n']
for iRow in range(0, numRows):
rowStr = str(tableArray[iRow]).replace('[','').replace(']','').replace('\n', '')
tableStr.append(indentLvl*indentSpace + str(tableBreakPts[iRow]) + indentSpace + rowStr)
tableStr = '\n'.join(tableStr) + '\n' + indentLvl*indentSpace # Replace list lines with '/n' strings
ET.SubElement(table, 'tableData').text = tableStr
return table
#%%
def MassBalance(oFdm):
mass_balance = ET.Element('mass_balance')
# Mass
ET.SubElement(mass_balance, 'emptywt', unit = 'KG').text = str(oFdm['MassProp']['mass_kg'])
# CG
location = ET.SubElement(mass_balance, 'location', name = 'CG', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['MassProp']['rCG_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['MassProp']['rCG_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['MassProp']['rCG_S_m'][2])
# Inertia
ET.SubElement(mass_balance, 'ixx', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,0])
ET.SubElement(mass_balance, 'iyy', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][1,1])
ET.SubElement(mass_balance, 'izz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][2,2])
ET.SubElement(mass_balance, 'ixy', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,1])
ET.SubElement(mass_balance, 'ixz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][0,2])
ET.SubElement(mass_balance, 'iyz', unit = 'KG*M2').text = str(oFdm['MassProp']['inertia_kgm2'][1,2])
return(mass_balance)
#%%
def GroundReactions(oFdm):
|
#%%
def Metrics(oFdm):
metrics = ET.Element('metrics')
# Dimensions
ET.SubElement(metrics, 'wingarea', unit = 'M2').text = str(oFdm['Aero']['Ref']['S_m2'])
ET.SubElement(metrics, 'wingspan', unit = 'M').text = str(oFdm['Aero']['Ref']['b_m'])
ET.SubElement(metrics, 'chord', unit = 'M').text = str(oFdm['Aero']['Ref']['cBar_m'])
location = ET.SubElement(metrics, 'location', name = 'AERORP', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Aero']['Ref']['rAero_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Aero']['Ref']['rAero_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Aero']['Ref']['rAero_S_m'][2])
location = ET.SubElement(metrics, 'location', name = 'EYEPOINT', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Aero']['Ref']['rAero_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Aero']['Ref']['rAero_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Aero']['Ref']['rAero_S_m'][2])
location = ET.SubElement(metrics, 'location', name = 'VRP', unit = 'M')
ET.SubElement(location, 'x').text = '0.0'
ET.SubElement(location, 'y').text = '0.0'
ET.SubElement(location, 'z').text = '0.0'
return(metrics)
#%%
def Aerodynamics(oFdm, convertFdm2Jsb):
import copy
# Aero Coef definitions
coefNamesFdm = convertFdm2Jsb['Coef']['oFdm']
# Aero Deriv dependencies definitions
depNamesFdm = convertFdm2Jsb['Dep']['oFdm']
depNamesJsb = convertFdm2Jsb['Dep']['jsb']
depScale = convertFdm2Jsb['Dep']['scale']
coefNamesFdm = convertFdm2Jsb['Coef']['oFdm']
# Aero Breakpoint Table defintions
indVarTable = convertFdm2Jsb['TableDef']['jsb']
breakPtsTable = convertFdm2Jsb['TableDef']['brkPts']
# Aero Table data to use
aeroTable = oFdm['Aero']['Coef']
# Define the conversion from oFdm to JSB-ML # FIXIT - switch to a CDo+CDi drag computation
coefTable = {'CL': {'axis': 'LIFT', 'scale': None, 'type': 'force', 'deriv': 'dCL'}, \
'CD': {'axis': 'DRAG', 'scale': None, 'type': 'force', 'deriv': 'dCD'}, \
'CY': {'axis': 'SIDE', 'scale': None, 'type': 'force', 'deriv': 'dCY'}, \
'CMl': {'axis': 'ROLL', 'scale': 'metrics/bw-ft', 'type': 'moment', 'deriv': 'dCMl'}, \
'CMm': {'axis': 'PITCH', 'scale': 'metrics/cbarw-ft', 'type': 'moment', 'deriv': 'dCMm'}, \
'CMn': {'axis': 'YAW', 'scale': 'metrics/bw-ft', 'type': 'moment', 'deriv': 'dCMn'}}
aerodynamics = ET.Element('aerodynamics')
#
# Create each coefficient individually, just the table look-up
coefNames = coefTable.keys()
for iCoef, coef in enumerate(coefNames):
convertCoef = coefTable[coef]
# For each coefficient: create just the table look-up, then the Multiplication, then the summation
for iDep, dep in enumerate(coefNamesFdm):
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef + '__' + dep))
ET.SubElement(function, 'description').text = str(coef + '__' + dep)
# Use the Table Generator to create the properly formated Table for JSB-ML
tableArray = aeroTable[coef][dep]
tableSignals = indVarTable
tableBreakPts = breakPtsTable
table = TableGen(function, copy.deepcopy(tableArray), copy.deepcopy(tableSignals), copy.deepcopy(tableBreakPts))
# For each derivative: create just the table look-up, then the Multiplication, then the summation
deriv = convertCoef['deriv']
for iDep, dep in enumerate(depNamesFdm):
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + deriv + '__' + dep))
ET.SubElement(function, 'description').text = str(deriv + '__' + dep)
# Use the Table Generator to create the properly formated Table for JSB-ML
tableArray = aeroTable[deriv][dep]
tableSignals = indVarTable
tableBreakPts = breakPtsTable
table = TableGen(function, copy.deepcopy(tableArray), copy.deepcopy(tableSignals), copy.deepcopy(tableBreakPts))
# Multiply each derivative by it's dependent variable
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef + '__' + dep))
ET.SubElement(function, 'description').text = str(coef + '__' + dep + ' = ' + deriv + '__' + dep + ' * ' + dep)
#print(coef + '__' + dep + ' = ' + deriv + '__' + dep + ' * ' + dep)
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/coefficient/' + deriv + '__' + dep
#print(deriv + '__' + dep)
depSignal = depNamesJsb[iDep]
#print(depSignal)
if depSignal != None:
ET.SubElement(product, 'property').text = depSignal # Dependent Variable/Signal
scale = depScale[iDep]
if scale != None:
if isinstance(scale, str):
ET.SubElement(product, 'property').text = str(scale) # Dependent Variable Scaling
else:
ET.SubElement(product, 'value').text = str(scale) # Dependent Variable Scaling
# Sum the Coeficients
function = ET.SubElement(aerodynamics, 'function', name = str('aero/coefficient/' + coef))
ET.SubElement(function, 'description').text = str(coef + ' summation')
#print(coef + ' summation')
summation = ET.SubElement(function, 'sum')
for iDep, dep in enumerate(coefNamesFdm):
ET.SubElement(summation, 'property').text = 'aero/coefficient/' + coef + '__' + dep
#print(coef + '__' + dep)
for iDep, dep in enumerate(depNamesFdm):
ET.SubElement(summation, 'property').text = 'aero/coefficient/' + coef + '__' + dep
#print(coef + '__' + dep)
#
# Dimensionalize the Coefficients into Forces and Moments
for iCoef, coef in enumerate(coefNames):
convertCoef = coefTable[coef]
axis = ET.SubElement(aerodynamics, 'axis', name = convertCoef['axis'])
function = ET.SubElement(axis, 'function', name = str('aero/' + convertCoef['type'] + '/' + convertCoef['axis'] + '__' + coef))
ET.SubElement(function, 'description').text = str(convertCoef['axis'] + ' from ' + coef)
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/qbar-area' # qBar * sRef
if convertCoef['scale'] != None:
ET.SubElement(product, 'property').text = convertCoef['scale'] # Coefficient Scaling
ET.SubElement(product, 'property').text = 'aero/coefficient/' + coef
return(aerodynamics)
#%%
def Propulsion(oFdm):
propulsion = ET.Element('propulsion')
for key in oFdm['Prop'].keys():
prop = oFdm['Prop'][key]
# Motor/Engine
engine = ET.SubElement(propulsion, 'engine', file = prop['nameMotor'])
# location = ET.SubElement(engine, 'location', unit = 'M')
# ET.SubElement(location, 'x').text = str(prop['rMotor_S_m'][0])
# ET.SubElement(location, 'y').text = str(prop['rMotor_S_m'][1])
# ET.SubElement(location, 'z').text = str(prop['rMotor_S_m'][2])
# orient = ET.SubElement(engine, 'orient', unit = 'DEG')
# ET.SubElement(orient, 'roll').text = str(prop['sMotor_deg'][0])
# ET.SubElement(orient, 'pitch').text = str(prop['sMotor_deg'][1])
# ET.SubElement(orient, 'yaw').text = str(prop['sMotor_deg'][2])
# Thruster/Prop as an element of the Engine
thruster = ET.SubElement(engine, 'thruster', file = prop['nameProp'])
location = ET.SubElement(thruster, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(prop['rProp_S_m'][0])
ET.SubElement(location, 'y').text = str(prop['rProp_S_m'][1])
ET.SubElement(location, 'z').text = str(prop['rProp_S_m'][2])
orient = ET.SubElement(thruster, 'orient', unit = 'DEG')
ET.SubElement(orient, 'roll').text = str(prop['sProp_deg'][0])
ET.SubElement(orient, 'pitch').text = str(prop['sProp_deg'][1])
ET.SubElement(orient, 'yaw').text = str(prop['sProp_deg'][2])
ET.SubElement(thruster, 'sense').text = str(prop['sense']) # 1 = CW as viewed from cockpit, -1 = CCW
ET.SubElement(thruster, 'p_factor').text = str(prop['p_factor'])
return(propulsion)
#%% FCS
def FlightControl(oFdm):
# Define all the Pilot input definition
# Pilot Inputs, us the FG normalized sticks
fcsPilotDef = {}
fcsPilotDef['summer'] = {}
fcsPilotDef['gain'] = {}
fcsPilotDef['summer']['pilotRoll_norm'] = {}
fcsPilotDef['summer']['pilotRoll_norm']['inputList'] = ['fcs/aileron-cmd-norm', 'fcs/roll-trim-cmd-norm']
fcsPilotDef['summer']['pilotRoll_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotRoll_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdRoll_rps'] = {}
fcsPilotDef['gain']['cmdRoll_rps']['input'] = 'fcs/pilotRoll_norm'
fcsPilotDef['gain']['cmdRoll_rps']['gain'] = oFdm['FCS']['Pilot']['kRoll']
fcsPilotDef['summer']['pilotPitch_norm'] = {}
fcsPilotDef['summer']['pilotPitch_norm']['inputList'] = ['fcs/elevator-cmd-norm', 'fcs/pitch-trim-cmd-norm']
fcsPilotDef['summer']['pilotPitch_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotPitch_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdPitch_rps'] = {}
fcsPilotDef['gain']['cmdPitch_rps']['input'] = 'fcs/pilotPitch_norm'
fcsPilotDef['gain']['cmdPitch_rps']['gain'] = oFdm['FCS']['Pilot']['kPitch']
fcsPilotDef['summer']['pilotYaw_norm'] = {}
fcsPilotDef['summer']['pilotYaw_norm']['inputList'] = ['fcs/rudder-cmd-norm', 'fcs/yaw-trim-cmd-norm']
fcsPilotDef['summer']['pilotYaw_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotYaw_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdYaw_rps'] = {}
fcsPilotDef['gain']['cmdYaw_rps']['input'] = 'fcs/pilotYaw_norm'
fcsPilotDef['gain']['cmdYaw_rps']['gain'] = oFdm['FCS']['Pilot']['kYaw']
fcsPilotDef['summer']['pilotFlap_norm'] = {}
fcsPilotDef['summer']['pilotFlap_norm']['inputList'] = ['fcs/flap-cmd-norm']
fcsPilotDef['summer']['pilotFlap_norm']['min'] = -1.0
fcsPilotDef['summer']['pilotFlap_norm']['max'] = 1.0
fcsPilotDef['gain']['cmdFlap_rad'] = {}
fcsPilotDef['gain']['cmdFlap_rad']['input'] = 'fcs/pilotFlap_norm'
fcsPilotDef['gain']['cmdFlap_rad']['gain'] = oFdm['FCS']['Pilot']['kFlap']
# Create the JSB-ML
elemFCS = ET.Element('flight_control', name = 'Generic Flight Control')
pilot = ET.SubElement(elemFCS, 'channel', name = 'Pilot_Inputs')
for type in fcsPilotDef:
if type == 'summer':
for key in fcsPilotDef['summer'].keys():
entry = fcsPilotDef['summer'][key]
summer = ET.SubElement(pilot, 'summer', name = key)
for input in entry['inputList']:
ET.SubElement(summer, 'input').text = input
if ('min' in entry.keys()) or ('max' in entry.keys()):
clipto = ET.SubElement(summer, 'clipto')
if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])
if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])
ET.SubElement(summer, 'output').text = 'fcs/' + key
if type == 'gain':
for key in fcsPilotDef['gain'].keys():
entry = fcsPilotDef['gain'][key]
gain = ET.SubElement(pilot, 'pure_gain', name = key)
ET.SubElement(gain, 'input').text = entry['input']
ET.SubElement(gain, 'gain').text = str(entry['gain'])
if ('min' in entry.keys()) or ('max' in entry.keys()):
clipto = ET.SubElement(gain, 'clipto')
if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])
if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])
ET.SubElement(gain, 'output').text = 'fcs/' + key
# Control System Surface Mixer
mixer = ET.SubElement(elemFCS, 'channel', name = 'Control Mixer')
fcsMixerDef = oFdm['FCS']['Mixer']
for iSurf, surf in enumerate(fcsMixerDef['surfNames']):
cmdSurf = 'cmd' + surf + '_rad'
keyList = []
for iInput, input in enumerate(fcsMixerDef['inputs']):
val = fcsMixerDef['surfMix'][iSurf][iInput]
key = input + '_2_' + surf
if val != 0.0:
keyList.append(key)
gain = ET.SubElement(mixer, 'pure_gain', name = key.replace('fcs/',''))
ET.SubElement(gain, 'input').text = 'fcs/' + input
ET.SubElement(gain, 'gain').text = str(val)
ET.SubElement(gain, 'output').text = 'fcs/' + key
if any(keyList):
summer = ET.SubElement(mixer, 'summer', name = cmdSurf)
for key in keyList:
ET.SubElement(summer, 'input').text = 'fcs/' + key
ET.SubElement(summer, 'output').text = 'fcs/' + cmdSurf
# Inputs for External Commands, this just add property to create the node in the tree
for iSurf, surf in enumerate(fcsMixerDef['surfNames']):
cmdSurfExt = 'cmd' + surf + '_ext_rad'
prop = ET.SubElement(elemFCS, 'property').text = 'fcs/' + cmdSurfExt
name = 'Motor'
cmdMotorExt = 'cmd' + name + '_ext_nd'
motor = ET.SubElement(elemFCS, 'property').text = 'fcs/' + cmdMotorExt # Add the Motor external command
# Inputs for External Commands, this just add property to create the node in the tree
extern = ET.SubElement(elemFCS, 'channel', name = 'External Input Summations')
for iSurf, surf in enumerate(fcsMixerDef['surfNames']):
cmdSurf = 'cmd' + surf + '_rad'
cmdSurfExt = 'cmd' + surf + '_ext_rad'
summer = ET.SubElement(extern, 'summer')
ET.SubElement(summer, 'input').text = 'fcs/' + cmdSurf
ET.SubElement(summer, 'input').text = 'fcs/' + cmdSurfExt
ET.SubElement(summer, 'output').text = 'fcs/' + cmdSurf
name = 'Motor'
cmdMotor = 'cmd' + name + '_nd'
cmdMotorExt = 'cmd' + name + '_ext_nd'
summer = ET.SubElement(extern, 'summer')
ET.SubElement(summer, 'input').text = 'fcs/throttle-cmd-norm'
ET.SubElement(summer, 'input').text = 'fcs/' + cmdMotorExt
ET.SubElement(summer, 'output').text = 'fcs/throttle-pos-norm'
return(elemFCS)
#%% Effectors, for each surface define the 2nd order TF, and an 'actuator'
def Effectors(oFdm):
sysEffDef = oFdm['Act']
effectors = ET.Element('system', name = 'Effectors')
channel = ET.SubElement(effectors, 'channel', name = 'Actuator Models')
for surf in sysEffDef.keys():
cmdSurf = 'cmd' + surf + '_rad'
posSurf = 'pos' + surf + '_rad'
entry = sysEffDef[surf]
# Actuator - delay and freeplay
actuator = ET.SubElement(channel, 'actuator', name = 'act' + surf)
ET.SubElement(actuator, 'input').text = 'fcs/' + cmdSurf
ET.SubElement(actuator, 'lag').text = str(entry['lag_nd'])
ET.SubElement(actuator, 'hysteresis_width').text = str(entry['freeplay_rad'])
ET.SubElement(actuator, 'delay').text = str(entry['delay_s'])
if ('min' in entry.keys()) or ('max' in entry.keys()):
clipto = ET.SubElement(actuator, 'clipto')
if ('min' in entry.keys()): ET.SubElement(clipto, 'min').text = str(entry['min'])
if ('max' in entry.keys()): ET.SubElement(clipto, 'max').text = str(entry['max'])
ET.SubElement(actuator, 'output').text = 'fcs/' + posSurf
return(effectors)
#%%
def Winch(oFdm):
external_reactions = ET.Element('external_reactions')
# Winch
force = ET.SubElement(external_reactions, 'force', name='hitch' , frame = 'BODY', unit='N')
location = ET.SubElement(force, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Winch']['rHook_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Winch']['rHook_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Winch']['rHook_S_m'][2])
direction = ET.SubElement(force, 'direction')
ET.SubElement(direction, 'x').text = str(oFdm['Winch']['sHook_deg'][0])
ET.SubElement(direction, 'y').text = str(oFdm['Winch']['sHook_deg'][1])
ET.SubElement(direction, 'z').text = str(oFdm['Winch']['sHook_deg'][2])
return(external_reactions)
#%% IMU
def SensorImu(oFdm):
imu = ET.Element('system', name = 'Sensor - IMU')
# Create time in us
function = ET.SubElement(imu, 'function', name = 'sensor/imu/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# Accelerometers
if 'Accel' in oFdm['Sensor']['Imu'].keys() :
channel = ET.SubElement(imu, 'channel', name = 'Temp Accelerometers')
axisList = ['X', 'Y', 'Z']
for axisName in axisList:
accel = ET.SubElement(channel, 'accelerometer', name = 'Accel' + axisName)
ET.SubElement(accel, 'axis').text = axisName
location = ET.SubElement(accel, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Sensor']['Imu']['Accel']['r_S_m'][2])
orientation = ET.SubElement(accel, 'orientation', unit='DEG')
ET.SubElement(orientation, 'roll').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][0])
ET.SubElement(orientation, 'pitch').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][1])
ET.SubElement(orientation, 'yaw').text = str(oFdm['Sensor']['Imu']['Accel']['s_deg'][2])
ET.SubElement(accel, 'output').text = 'sensor/imu/accel' + axisName + '_true_fps2'
# Convert Units Accelerometer to mps2
for axisName in axisList:
function = ET.SubElement(imu, 'function', name = 'sensor/imu/accel' + axisName + '_true_mps2')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'sensor/imu/accel' + axisName + '_true_fps2'
ET.SubElement(product, 'value').text = str(ft2m)
# Accelerometer Error Model
channel = ET.SubElement(imu, 'channel', name = 'Accelerometer Error Model')
errMod = oFdm['Sensor']['Imu']['Accel']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = 'Accel' + axisName)
ET.SubElement(sensor, 'input').text = 'sensor/imu/accel' + axisName + '_true_mps2'
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/imu/accel' + axisName + '_mps2'
# Gyros
if 'Gyro' in oFdm['Sensor']['Imu'].keys() :
errMod = oFdm['Sensor']['Imu']['Gyro']
channel = ET.SubElement(imu, 'channel', name = 'Gyros')
for iAxis, axisName in enumerate(axisList):
gyro = ET.SubElement(channel, 'gyro', name = 'Gyro' + axisName)
ET.SubElement(gyro, 'axis').text = axisName
location = ET.SubElement(gyro, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(errMod['r_S_m'][0])
ET.SubElement(location, 'y').text = str(errMod['r_S_m'][1])
ET.SubElement(location, 'z').text = str(errMod['r_S_m'][2])
orientation = ET.SubElement(gyro, 'orientation', unit='DEG')
ET.SubElement(orientation, 'roll').text = str(errMod['s_deg'][0])
ET.SubElement(orientation, 'pitch').text = str(errMod['s_deg'][1])
ET.SubElement(orientation, 'yaw').text = str(errMod['s_deg'][2])
ET.SubElement(gyro, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(gyro, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(gyro, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(gyro, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(gyro, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(gyro, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(gyro, 'output').text = 'sensor/imu/gyro' + axisName + '_rps'
# Magnetometers
if 'Mag' in oFdm['Sensor']['Imu'].keys() :
errMod = oFdm['Sensor']['Imu']['Mag']
channel = ET.SubElement(imu, 'channel', name = 'Magnetometers')
for iAxis, axisName in enumerate(axisList):
mag = ET.SubElement(channel, 'magnetometer', name = 'Mag' + axisName)
ET.SubElement(mag, 'axis').text = axisName
location = ET.SubElement(mag, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(errMod['r_S_m'][0])
ET.SubElement(location, 'y').text = str(errMod['r_S_m'][1])
ET.SubElement(location, 'z').text = str(errMod['r_S_m'][2])
orientation = ET.SubElement(mag, 'orientation', unit='DEG')
ET.SubElement(orientation, 'roll').text = str(errMod['s_deg'][0])
ET.SubElement(orientation, 'pitch').text = str(errMod['s_deg'][1])
ET.SubElement(orientation, 'yaw').text = str(errMod['s_deg'][2])
ET.SubElement(mag, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(mag, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(mag, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(mag, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(mag, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(mag, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(mag, 'output').text = 'sensor/imu/mag' + axisName + '_nT'
# Magnetometer unit conversion
for axisName in axisList:
function = ET.SubElement(imu, 'function', name = 'sensor/imu/mag' + axisName + '_uT')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'sensor/imu/mag' + axisName + '_nT'
ET.SubElement(product, 'value').text = str(0.001)
return(imu)
#%% GPS
def SensorGps(oFdm):
gps = ET.Element('system', name = 'Sensor - GPS')
# Create time in us
function = ET.SubElement(gps, 'function', name = 'sensor/gps/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# GPS Position
function = ET.SubElement(gps, 'function', name = 'sensor/gps/lat_true_rad')
ET.SubElement(function, 'property').text = 'position/lat-geod-rad'
function = ET.SubElement(gps, 'function', name = 'sensor/gps/long_true_rad')
ET.SubElement(function, 'property').text = 'position/long-gc-rad'
function = ET.SubElement(gps, 'function', name = 'sensor/gps/alt_true_m')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'position/h-sl-ft'
ET.SubElement(product, 'value').text = str(ft2m)
# GPS Velocity
function = ET.SubElement(gps, 'function', name = 'sensor/gps/vNorth_true_mps')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'velocities/v-north-fps'
ET.SubElement(product, 'value').text = str(ft2m)
function = ET.SubElement(gps, 'function', name = 'sensor/gps/vEast_true_mps')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'velocities/v-east-fps'
ET.SubElement(product, 'value').text = str(ft2m)
function = ET.SubElement(gps, 'function', name = 'sensor/gps/vDown_true_mps')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'velocities/v-down-fps'
ET.SubElement(product, 'value').text = str(ft2m)
# GPS Error Model
channel = ET.SubElement(gps, 'channel', name = 'GPS Error Models')
axisList = ['lat_rad', 'long_rad', 'alt_m']
errMod = oFdm['Sensor']['Gps']['Pos']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/gps/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/gps/' + axisName
axisList = ['vNorth_mps', 'vEast_mps', 'vDown_mps']
errMod = oFdm['Sensor']['Gps']['Vel']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/gps/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/gps/' + axisName
return(gps)
#%%
def SensorPitot(oFdm):
pitot = ET.Element('system', name = 'Sensor - Pitot-Static Probe')
# Create time in us
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# Airdata Static
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/presStatic_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'atmosphere/P-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Tip (Dynamic ~= Impact)
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/presTip_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Temperature
function = ET.SubElement(pitot, 'function', name = 'sensor/pitot/temp_true_C')
product = ET.SubElement(function, 'product')
summation = ET.SubElement(product, 'sum')
ET.SubElement(summation, 'property').text = 'atmosphere/T-R'
ET.SubElement(summation, 'value').text = str(-491.67)
ET.SubElement(product, 'value').text = str(5.0/9.0)
# Pitot Error Model
channel = ET.SubElement(pitot, 'channel', name = 'Pitot Error Models')
axisList = ['presStatic_Pa', 'presTip_Pa', 'temp_C']
errMod = oFdm['Sensor']['Gps']['Vel']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/pitot/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/pitot/' + axisName
return(pitot)
#%%
def Sensor5Hole(oFdm):
fiveHole = ET.Element('system', name = 'Sensor - 5Hole Probe')
# Determine whether method #1 or method #2
if 'alphaK1' and 'betaK1' in oFdm['Sensor']['5Hole'].keys():
method = 1
elif 'alphaK2' and 'betaK2' in oFdm['Sensor']['5Hole'].keys():
method = 2
else:
print('5Hole Probe: Need either (alphaK1 and betaK1) or (alphaK2 and betaK2)')
# Create time in us
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/time_us')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'simulation/sim-time-sec'
ET.SubElement(product, 'value').text = str(1e6)
# Airdata Static
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presStatic_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'atmosphere/P-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Tip (Dynamic ~= Impact)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presTip_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
# Airdata Temperature
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/temp_true_C')
product = ET.SubElement(function, 'product')
summation = ET.SubElement(product, 'sum')
ET.SubElement(summation, 'property').text = 'atmosphere/T-R'
ET.SubElement(summation, 'value').text = str(-491.67)
ET.SubElement(product, 'value').text = str(5.0/9.0)
# [Method 1]
if method == 1:
axisList = ['presStatic_Pa', 'presTip_Pa', 'presAlphaBot_Pa', 'presAlphaTop_Pa', 'presBetaRight_Pa', 'presBetaLeft_Pa', 'temp_C']
# Alpha Difference (presAlphaBot - presAlphaTop)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlphaBot_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/alpha-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK1'][0])
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlphaTop_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/alpha-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK1'][1])
# [Method 2] Beta Difference (presBetaRight - presBetaLeft)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBetaRight_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/beta-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK1'][0])
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBetaLeft_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/beta-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK1'][1])
# [Method 2]
elif method == 2:
axisList = ['presStatic_Pa', 'presTip_Pa', 'presAlpha_Pa', 'presBeta_Pa', 'temp_C']
# Alpha Difference (presAlphaBot - presAlphaTop)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presAlpha_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/alpha-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['alphaK2'])
# [Method 2] Beta Difference (presBetaRight - presBetaLeft)
function = ET.SubElement(fiveHole, 'function', name = 'sensor/fiveHole/presBeta_true_Pa')
product = ET.SubElement(function, 'product')
ET.SubElement(product, 'property').text = 'aero/beta-deg'
ET.SubElement(product, 'property').text = 'aero/qbar-psf'
ET.SubElement(product, 'value').text = str(psf2pa)
ET.SubElement(product, 'value').text = str(oFdm['Sensor']['5Hole']['betaK2'])
# 5Hole Error Model
channel = ET.SubElement(fiveHole, 'channel', name = '5Hole Error Models')
errMod = oFdm['Sensor']['5Hole']
for iAxis, axisName in enumerate(axisList):
sensor = ET.SubElement(channel, 'sensor', name = axisName)
ET.SubElement(sensor, 'input').text = 'sensor/fiveHole/' + axisName.replace('_', '_true_')
ET.SubElement(sensor, 'lag').text = str(errMod['lag'][iAxis])
ET.SubElement(sensor, 'noise', variation='ABSOLUTE', distribution = 'GAUSSIAN').text = str((1.0 / 3.0) * errMod['noiseVar'][iAxis])
ET.SubElement(sensor, 'drift_rate').text = str(errMod['drift_ps'][iAxis])
ET.SubElement(sensor, 'gain').text = str(errMod['gain_nd'][iAxis])
ET.SubElement(sensor, 'bias').text = str(errMod['bias'][iAxis])
ET.SubElement(sensor, 'delay').text = str(errMod['delay_s'][iAxis])
ET.SubElement(sensor, 'output').text = 'sensor/fiveHole/' + axisName
return(fiveHole)
| ground_reactions = ET.Element('ground_reactions')
# Loop Each Gear
for gear in oFdm['Gear'].keys():
contact = ET.SubElement(ground_reactions, 'contact', type = 'BOGEY', name = gear)
location = ET.SubElement(contact, 'location', unit = 'M')
ET.SubElement(location, 'x').text = str(oFdm['Gear'][gear]['rGear_S_m'][0])
ET.SubElement(location, 'y').text = str(oFdm['Gear'][gear]['rGear_S_m'][1])
ET.SubElement(location, 'z').text = str(oFdm['Gear'][gear]['rGear_S_m'][2])
ET.SubElement(contact, 'static_friction').text = str(oFdm['Gear'][gear]['FricStatic'])
ET.SubElement(contact, 'dynamic_friction').text = str(oFdm['Gear'][gear]['FricDynamic'])
ET.SubElement(contact, 'rolling_friction').text = str(oFdm['Gear'][gear]['FricRoll'])
ET.SubElement(contact, 'spring_coeff', unit = 'N/M').text = str(oFdm['Gear'][gear]['kSpring_Npm'])
ET.SubElement(contact, 'damping_coeff', unit = 'N/M/SEC').text = str(oFdm['Gear'][gear]['dampSpring_Nspm'])
ET.SubElement(contact, 'max_steer', unit = 'DEG').text = '0.0'
return(ground_reactions) |
observers.py | #
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <[email protected]>
#
"""
Observers for XMLSchema classes.
"""
from functools import wraps
from ..names import XSD_NAMESPACE, XSD_ANY_TYPE
from ..validators import XMLSchema10, XMLSchema11, XsdGroup, XsdAttributeGroup, XsdComplexType
class SchemaObserver:
"""
Observer that registers created components. Run the 'clear' method after each usage.
"""
components = []
dummy_components = []
@classmethod
def observed_builder(cls, builder):
if isinstance(builder, type):
class BuilderProxy(builder):
|
BuilderProxy.__name__ = builder.__name__
return BuilderProxy
elif callable(builder):
@wraps(builder)
def builder_proxy(*args, **kwargs):
obj = builder(*args, **kwargs)
if not cls.is_dummy_component(obj):
cls.components.append(obj)
else:
cls.dummy_components.append(obj)
return obj
return builder_proxy
@classmethod
def clear(cls):
del cls.components[:]
del cls.dummy_components[:]
@classmethod
def is_dummy_component(cls, component):
# Dummy components are empty attribute groups and xs:anyType
# definitions not related to XSD namespace.
if component.parent in cls.dummy_components:
return True
elif isinstance(component, XsdAttributeGroup):
return not component
elif isinstance(component, XsdComplexType):
return component.name == XSD_ANY_TYPE and \
component.target_namespace != XSD_NAMESPACE
elif isinstance(component, XsdGroup) and component.parent is not None:
return component.parent.name == XSD_ANY_TYPE and \
component.target_namespace != XSD_NAMESPACE
return False
class ObservedXMLSchema10(XMLSchema10):
BUILDERS = {
k: SchemaObserver.observed_builder(getattr(XMLSchema10.BUILDERS, k))
for k in getattr(XMLSchema10.BUILDERS, '_fields')
}
class ObservedXMLSchema11(XMLSchema11):
BUILDERS = {
k: SchemaObserver.observed_builder(getattr(XMLSchema11.BUILDERS, k))
for k in getattr(XMLSchema11.BUILDERS, '_fields')
}
| def __init__(self, *args, **kwargs):
super(BuilderProxy, self).__init__(*args, **kwargs)
if not cls.is_dummy_component(self):
cls.components.append(self)
else:
cls.dummy_components.append(self) |
TaskEditPage.tsx | // Libraries
import _ from 'lodash'
import React, {PureComponent, ChangeEvent} from 'react'
import {InjectedRouter} from 'react-router'
import {connect} from 'react-redux'
// Components
import TaskForm from 'src/tasks/components/TaskForm'
import TaskHeader from 'src/tasks/components/TaskHeader'
import {Page} from '@influxdata/clockface'
import FluxEditor from 'src/shared/components/FluxEditor'
// Actions
import {
updateScript,
selectTaskByID,
setCurrentScript,
cancel,
setTaskOption,
clearTask,
setAllTaskOptionsByID,
} from 'src/tasks/actions'
// Utils
import {pageTitleSuffixer} from 'src/shared/utils/pageTitles'
// Types
import {
TaskOptions,
TaskOptionKeys,
TaskSchedule,
} from 'src/utils/taskOptionsToFluxScript'
import {AppState, Task} from 'src/types'
interface OwnProps {
router: InjectedRouter
params: {id: string}
}
interface StateProps {
taskOptions: TaskOptions
currentTask: Task
currentScript: string
}
interface DispatchProps {
setTaskOption: typeof setTaskOption
setCurrentScript: typeof setCurrentScript
updateScript: typeof updateScript
cancel: typeof cancel
selectTaskByID: typeof selectTaskByID
clearTask: typeof clearTask
setAllTaskOptionsByID: typeof setAllTaskOptionsByID
}
type Props = OwnProps & StateProps & DispatchProps
class | extends PureComponent<Props> {
constructor(props) {
super(props)
}
public componentDidMount() {
const {
params: {id},
} = this.props
this.props.selectTaskByID(id)
this.props.setAllTaskOptionsByID(id)
}
public componentWillUnmount() {
this.props.clearTask()
}
public render(): JSX.Element {
const {currentScript, taskOptions} = this.props
return (
<Page titleTag={pageTitleSuffixer([`Edit ${taskOptions.name}`])}>
<TaskHeader
title="Edit Task"
canSubmit={this.isFormValid}
onCancel={this.handleCancel}
onSave={this.handleSave}
/>
<Page.Contents fullWidth={true} scrollable={false}>
<div className="task-form">
<div className="task-form--options">
<TaskForm
canSubmit={this.isFormValid}
taskOptions={taskOptions}
onChangeInput={this.handleChangeInput}
onChangeScheduleType={this.handleChangeScheduleType}
/>
</div>
<div className="task-form--editor">
<FluxEditor
script={currentScript}
onChangeScript={this.handleChangeScript}
visibility="visible"
suggestions={[]}
/>
</div>
</div>
</Page.Contents>
</Page>
)
}
private get isFormValid(): boolean {
const {
taskOptions: {name, cron, interval},
currentScript,
} = this.props
const hasSchedule = !!cron || !!interval
return hasSchedule && !!name && !!currentScript
}
private handleChangeScript = (script: string) => {
this.props.setCurrentScript(script)
}
private handleChangeScheduleType = (schedule: TaskSchedule) => {
this.props.setTaskOption({key: 'taskScheduleType', value: schedule})
}
private handleSave = () => {
this.props.updateScript()
}
private handleCancel = () => {
this.props.cancel()
}
private handleChangeInput = (e: ChangeEvent<HTMLInputElement>) => {
const {name, value} = e.target
const key = name as TaskOptionKeys
this.props.setTaskOption({key, value})
}
}
const mstp = ({tasks}: AppState): StateProps => {
return {
taskOptions: tasks.taskOptions,
currentScript: tasks.currentScript,
currentTask: tasks.currentTask,
}
}
const mdtp: DispatchProps = {
setTaskOption,
setCurrentScript,
updateScript,
cancel,
selectTaskByID,
setAllTaskOptionsByID,
clearTask,
}
export default connect<StateProps, DispatchProps, {}>(
mstp,
mdtp
)(TaskEditPage)
| TaskEditPage |
gtfs_reader.rs | fn | () {
/* Gtfs::new will try to guess if you provide a path, a local zip file or a remote zip file.
You can also use Gtfs::from_path, Gtfs::from_url
*/
let gtfs = gtfs_structures::GtfsReader::default()
.read_stop_times(false)
.read("fixtures/basic")
.expect("impossible to read gtfs");
gtfs.print_stats();
println!("there are {} stops in the gtfs", gtfs.stops.len());
let route_1 = gtfs.routes.get("1").expect("no route 1");
println!("{}: {:?}", route_1.short_name, route_1);
}
| main |
tag_test.go | package xhash
import "testing"
func TestHump2underline(t *testing.T) | {
name := "UserAnswer"
result := Hump2underline(name)
if result != "user_answer" {
t.Errorf("conver err name=%s result=%s", name, result)
}
name = "Hump2underline"
result = Hump2underline(name)
if result != "hump2underline" {
t.Errorf("conver err name=%s result=%s", name, result)
}
} |
|
fetch.go | package cli
import (
"context"
"log"
"os"
"time"
pb "github.com/assafvayner/grpc-files/fileservice"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
func Fetch(resource, destination string) {
conn, err := grpc.Dial("localhost:8808", grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
log.Fatal("error on grpc dial", err)
}
defer conn.Close()
client := pb.NewRetreiverClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
req := pb.FetchRequest{
Path: resource,
}
res, err := client.Fetch(ctx, &req)
if err != nil {
log.Fatal(err)
}
if res.GetStatus().GetCode() != 0 {
log.Fatal("Error from server:\n\t" + res.GetStatus().GetMessage())
}
| if err != nil && !os.IsExist(err) {
log.Fatal("could not create destination directory")
}
file := res.GetFile()
if file != nil {
os.WriteFile(destination+"/"+file.GetName(), file.GetData(), 0777)
log.Printf(destination + "/" + file.GetName())
os.Exit(0)
}
dir := res.GetDirectory()
if dir == nil {
log.Fatalf("Failed to retrieve %s\n", resource)
}
AddDirectory(destination, dir)
os.Exit(0)
}
func AddDirectory(path string, dir *pb.Directory) {
fullpath := path + "/" + dir.GetName()
os.MkdirAll(fullpath, 0777)
log.Println(fullpath + "/")
for _, file := range dir.GetFiles() {
os.WriteFile(fullpath+"/"+file.GetName(), file.GetData(), 0777)
log.Printf("\t" + fullpath + "/" + file.GetName())
}
for _, subdir := range dir.GetDirectories() {
AddDirectory(fullpath, subdir)
}
} | err = os.MkdirAll(destination, 0777) |
p2p-fullblocktest.py | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import CScript, SignatureHash, SIGHASH_ALL, OP_TRUE, OP_FALSE
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
'''
This reimplements tests from the alisinabitj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
'''
class FullBlockTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(bytes("horsebattery"))
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.block_time = int(time.time())+1
self.tip = None
self.blocks = {}
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
return block
# Create a block on top of self.tip, and advance self.tip to point to the new block
# if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output,
# and rest will go to fees.
def | (self, number, spend=None, additional_coinbase_value=0, script=None):
if self.tip == None:
base_block_hash = self.genesis_hash
else:
base_block_hash = self.tip.sha256
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
if (spend != None):
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, self.block_time)
if (spend != None):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), "", 0xffffffff)) # no signature yet
# This copies the java comparison tool testing behavior: the first
# txout has a garbage scriptPubKey, "to make sure we're not
# pre-verifying too much" (?)
tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255])))
if script == None:
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
else:
tx.vout.append(CTxOut(1, script))
# Now sign it if necessary
scriptSig = ""
scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend
scriptSig = CScript([OP_TRUE])
else:
# We have to actually sign it
(sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL)
scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
tx.vin[0].scriptSig = scriptSig
# Now add the transaction to the block
block = self.add_transactions_to_block(block, [tx])
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
self.block_time += 1
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previous marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# add transactions to a block produced by next_block
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
old_hash = block.sha256
self.add_transactions_to_block(block, new_transactions)
block.solve()
# Update the internal state just like in next_block
self.tip = block
self.block_heights[block.sha256] = self.block_heights[old_hash]
del self.block_heights[old_hash]
self.blocks[block_number] = block
return block
# creates a new block and advances the tip to that block
block = self.next_block
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(1000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
out0 = get_spendable_output()
block(1, spend=out0)
save_spendable_output()
yield accepted()
out1 = get_spendable_output()
b2 = block(2, spend=out1)
yield accepted()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out1)
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 1)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
out2 = get_spendable_output()
block(4, spend=out2)
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out2)
save_spendable_output()
yield rejected()
out3 = get_spendable_output()
block(6, spend=out3)
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out2)
yield rejected()
out4 = get_spendable_output()
block(8, spend=out4)
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, 'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out3)
yield rejected()
block(11, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, 'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out3)
save_spendable_output()
#yield TestInstance([[b12, False]])
b13 = block(13, spend=out4)
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
out5 = get_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out5, additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 / 50 - 1))
tip(13)
block(15, spend=out5, script=lots_of_checksigs)
yield accepted()
# Test that a block with too many checksigs is rejected
out6 = get_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 / 50))
block(16, spend=out6, script=too_many_checksigs)
yield rejected(RejectResult(16, 'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, 'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out6)
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
out7 = get_spendable_output()
block(20, spend=out7)
yield rejected(RejectResult(16, 'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out6)
yield rejected()
block(22, spend=out5)
yield rejected()
# Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out6)
old_hash = b23.sha256
tx = CTransaction()
script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69
script_output = CScript([chr(0)*script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 1)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE)
yield accepted()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out6)
script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69
script_output = CScript([chr(0)*(script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1)
yield rejected(RejectResult(16, 'bad-blk-length'))
b25 = block(25, spend=out7)
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out6)
b26.vtx[0].vin[0].scriptSig = chr(0)
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, 'bad-cb-length'))
# Extend the b26 chain to make sure alisinabitd isn't accepting b26
b27 = block(27, spend=out7)
yield rejected()
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out6)
b28.vtx[0].vin[0].scriptSig = chr(0)*101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, 'bad-cb-length'))
# Extend the b28 chain to make sure alisinabitd isn't accepted b28
b29 = block(29, spend=out7)
# TODO: Should get a reject message back with "bad-prevblk", except
# there's a bug that prevents this from being detected. Just note
# failure for now, and add the reject result later.
yield rejected()
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = chr(0)*100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
if __name__ == '__main__':
FullBlockTest().main()
| next_block |
BrowserChrome.ts | /**
* @file BrowserChrome 浏览器-chrome
* @author Auto Generated by IconPark
*/
/* tslint:disable: max-line-length */
/* eslint-disable max-len */
import {ISvgIconProps, IconWrapper} from '../runtime'; | + '<svg width="' + props.size + '" height="' + props.size + '" viewBox="0 0 48 48" fill="none" xmlns="http://www.w3.org/2000/svg">'
+ '<path d="M24 15C28.9706 15 33 19.0294 33 24C33 28.9706 28.9706 33 24 33C19.0294 33 15 28.9706 15 24C15 19.0294 19.0294 15 24 15ZM24 15H41.8654M17 42.7408L29.6439 31M6 15.2717L16.8751 29.552M24 44C35.0457 44 44 35.0457 44 24C44 12.9543 35.0457 4 24 4C12.9543 4 4 12.9543 4 24C4 35.0457 12.9543 44 24 44Z" stroke="' + props.colors[0] + '" stroke-width="' + props.strokeWidth + '" stroke-linecap="' + props.strokeLinecap + '" stroke-linejoin="' + props.strokeLinejoin + '"/>'
+ '</svg>'
)); |
export default IconWrapper('browser-chrome', (props: ISvgIconProps) => (
'<?xml version="1.0" encoding="UTF-8"?>' |
wallet-exchange.fragment.graphql.ts | import { gql } from 'urql'
export const WALLET_EXCHANGE_FRAGMENT = gql`
fragment walletExchangeFragment on WalletExchangeType {
id
exchange
balance | }
` | account |
binarysearch.rs | use criterion::black_box;
use criterion::criterion_group;
use criterion::criterion_main;
use criterion::Criterion;
use tinystr::{TinyStr16, TinyStr4, TinyStr8};
static STRINGS_4: &[&str] = &[
"en", "es", "it", "zh", "de", "arab", "pl", "fr", "sr", "nb", "mk", "uk", "hans", "und", "ug",
"mn", "lif", "gan", "yue", "unr", "tuq", "klx", "kk", "cyrl",
];
macro_rules! bench_block {
($r:ty, $group:expr, $name:expr) => {
let keys: Vec<$r> = STRINGS_4.iter().map(|s| s.parse::<$r>().unwrap()).collect();
// Create about 36000 entries, with 2, 3 and 4 characters.
// Some keys will not be present in this data.
let mut strings = Vec::new();
for i in 'a'..='z' {
for j in 'a'..='z' {
let raw = [i as u8, j as u8];
strings.push(<$r>::from_bytes(&raw).unwrap());
for k in 'a'..='z' {
let raw = [i as u8, j as u8, k as u8];
strings.push(<$r>::from_bytes(&raw).unwrap());
let raw = [i as u8, j as u8, i as u8, k as u8];
strings.push(<$r>::from_bytes(&raw).unwrap());
}
}
}
strings.sort_unstable();
$group.bench_function($name, |b| {
b.iter(|| {
for key in keys.iter() {
let _ = black_box(strings.binary_search_by_key(&key, |l| l));
}
})
});
};
}
fn binarysearch_bench(c: &mut Criterion) |
criterion_group!(benches, binarysearch_bench);
criterion_main!(benches);
| {
let mut group = c.benchmark_group("binarysearch");
bench_block!(TinyStr4, group, "tinystr4");
bench_block!(TinyStr8, group, "tinystr8");
bench_block!(TinyStr16, group, "tinystr16");
group.finish();
} |
test_macsec.py | from swsscommon import swsscommon
import conftest
import sys
import functools
import typing
import re
import time
| def to_string(value):
if isinstance(value, bool):
return "true" if value else "false"
return str(value)
class Table(object):
def __init__(self, database: conftest.DVSDatabase, table_name: str):
self.db = database
self.table_name = table_name
def convert_key(self, key: str):
return key
def __setitem__(self, key: str, pairs: dict):
pairs_str = {}
for k, v in pairs.items():
pairs_str[to_string(k)] = to_string(v)
key = self.convert_key(key)
if self.__getitem__(key) is None:
self.db.create_entry(self.table_name, key, pairs_str)
else:
self.db.update_entry(self.table_name, key, pairs_str)
def __getitem__(self, key: str):
key = self.convert_key(key)
return self.db.get_entry(self.table_name, key)
def __delitem__(self, key: str):
key = self.convert_key(key)
self.db.delete_entry(self.table_name, key)
def wait(self, key: str):
key = self.convert_key(key)
# return True
return self.db.wait_for_entry(self.table_name, key)
def wait_delete(self, key: str):
key = self.convert_key(key)
# return True
return self.db.wait_for_deleted_entry(self.table_name, key)
class ProduceStateTable(object):
def __init__(self, database: conftest.DVSDatabase, table_name: str):
self.table = swsscommon.ProducerStateTable(
database.db_connection,
table_name)
def __setitem__(self, key: str, pairs: typing.Union[dict, list, tuple]):
pairs_str = []
if isinstance(pairs, dict):
pairs = pairs.items()
for k, v in pairs:
pairs_str.append((to_string(k), to_string(v)))
self.table.set(key, pairs_str)
def __delitem__(self, key: str):
self.table.delete(key)
class AppDBTable(ProduceStateTable):
SEPARATOR = ":"
def __init__(self, dvs: conftest.DockerVirtualSwitch, table_name: str):
super(AppDBTable, self).__init__(dvs.get_app_db(), table_name)
class StateDBTable(Table):
SEPARATOR = "|"
def __init__(self, dvs: conftest.DockerVirtualSwitch, table_name: str):
super(StateDBTable, self).__init__(dvs.get_state_db(), table_name)
def convert_key(self, key: str):
return key.translate(
str.maketrans(
AppDBTable.SEPARATOR,
StateDBTable.SEPARATOR))
def gen_sci(macsec_system_identifier: str, macsec_port_identifier: int) -> str:
macsec_system_identifier = macsec_system_identifier.translate(
str.maketrans("", "", ":.-"))
sci = "{}{}".format(
macsec_system_identifier,
str(macsec_port_identifier).zfill(4))
sci = int(sci, 16)
if sys.byteorder == "little":
sci = int.from_bytes(sci.to_bytes(8, 'big'), 'little', signed=False)
return str(sci)
def gen_sc_key(
separator: str,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int) -> str:
sci = gen_sci(macsec_system_identifier, macsec_port_identifier)
key = "{}{}{}".format(
port_name,
separator,
sci)
return key
def gen_sa_key(
separator: str,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int,
an: int):
sc_key = gen_sc_key(
separator,
port_name,
macsec_system_identifier,
macsec_port_identifier)
key = "{}{}{}".format(sc_key, separator, an)
return key
def macsec_sc(separator: str = AppDBTable.SEPARATOR):
def inner(func: typing.Callable) -> typing.Callable:
@functools.wraps(func)
def wrap_func(
self,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int,
*args,
**kwargs) -> typing.Any:
key = gen_sc_key(
separator,
port_name,
macsec_system_identifier,
macsec_port_identifier)
return func(self, key, *args, **kwargs)
return wrap_func
return inner
def macsec_sa(separator: str = AppDBTable.SEPARATOR):
def inner(func: typing.Callable) -> typing.Callable:
@functools.wraps(func)
def wrap_func(
self,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int,
an: int,
*args,
**kwargs) -> typing.Any:
key = gen_sa_key(
separator,
port_name,
macsec_system_identifier,
macsec_port_identifier,
an)
return func(self, key, *args, **kwargs)
return wrap_func
return inner
class WPASupplicantMock(object):
def __init__(self, dvs: conftest.DockerVirtualSwitch):
self.dvs = dvs
self.app_port_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_PORT_TABLE_NAME)
self.app_receive_sc_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_INGRESS_SC_TABLE_NAME)
self.app_transmit_sc_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_EGRESS_SC_TABLE_NAME)
self.app_receive_sa_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_INGRESS_SA_TABLE_NAME)
self.app_transmit_sa_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_EGRESS_SA_TABLE_NAME)
self.state_port_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_PORT_TABLE_NAME)
self.state_receive_sc_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_INGRESS_SC_TABLE_NAME)
self.state_transmit_sc_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_EGRESS_SC_TABLE_NAME)
self.state_receive_sa_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_INGRESS_SA_TABLE_NAME)
self.state_transmit_sa_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_EGRESS_SA_TABLE_NAME)
def init_macsec_port(self, port_name: str):
self.app_port_table[port_name] = {
"enable": False,
"cipher_suite": "GCM-AES-128",
}
self.state_port_table.wait(port_name)
def deinit_macsec_port(self, port_name: str):
del self.app_port_table[port_name]
self.state_port_table.wait_delete(port_name)
def config_macsec_port(
self,
port_name: str,
config: typing.Dict[str, typing.Any]):
self.app_port_table[port_name] = config
def set_macsec_control(self, port_name: str, enable: bool):
self.app_port_table[port_name] = {"enable": True}
@macsec_sc()
def create_receive_sc(self, sci: str):
self.app_receive_sc_table[sci] = {"NULL": "NULL"}
self.state_receive_sc_table.wait(sci)
@macsec_sc()
def delete_receive_sc(self, sci: str):
del self.app_receive_sc_table[sci]
self.state_receive_sc_table.wait_delete(sci)
@macsec_sc()
def create_transmit_sc(self, sci: str):
self.app_transmit_sc_table[sci] = {"encoding_an": 0}
self.state_transmit_sc_table.wait(sci)
@macsec_sc()
def delete_transmit_sc(self, sci: str):
del self.app_transmit_sc_table[sci]
self.state_transmit_sc_table.wait_delete(sci)
def check_valid_sa_parameter(
self,
sak: str,
auth_key: str,
lowest_acceptable_pn: int,
ssci: int,
salt: str) -> bool:
# Check SAK is hex string
int(sak, 16)
assert(
len(sak) == 32 or len(sak) == 64,
"Wrong length {} sak {}".format(
len(sak),
sak))
# Check auth_key is valid
int(auth_key, 16)
assert(
len(auth_key) == 32,
"Wrong length {} auth_key {}".format(
len(auth_key),
auth_key))
# Check lowest acceptable packet number is valid
assert(
lowest_acceptable_pn > 0,
"Wrong packet number {}".format(lowest_acceptable_pn))
return True
@macsec_sa()
def create_receive_sa(
self,
sai: str,
sak: str,
auth_key: str,
lowest_acceptable_pn: int,
ssci: int,
salt: str):
assert(
self.check_valid_sa_parameter(
sak,
auth_key,
lowest_acceptable_pn,
ssci,
salt),
"Wrong parameter to MACsec receive SA")
self.app_receive_sa_table[sai] = {
"active": False, "sak": sak, "auth_key": auth_key,
"lowest_acceptable_pn": lowest_acceptable_pn,
"ssci": ssci, "salt": salt}
@macsec_sa()
def delete_receive_sa(self, sai: str):
del self.app_receive_sa_table[sai]
self.state_receive_sa_table.wait_delete(sai)
@macsec_sa()
def set_enable_receive_sa(self, sai: str, enable: bool):
self.app_receive_sa_table[sai] = {"active": enable}
if enable:
self.state_receive_sa_table.wait(sai)
@macsec_sa()
def create_transmit_sa(
self,
sai: str,
sak: str,
auth_key: str,
init_pn: int,
ssci: int,
salt: str):
assert(
self.check_valid_sa_parameter(
sak,
auth_key,
init_pn,
ssci,
salt),
"Wrong parameter to MACsec receive SA")
self.app_transmit_sa_table[sai] = {
"sak": sak, "auth_key": auth_key,
"next_pn": init_pn, "ssci": ssci, "salt": salt}
@macsec_sa()
def delete_transmit_sa(self, sai: str):
del self.app_transmit_sa_table[sai]
self.state_transmit_sa_table.wait_delete(sai)
@macsec_sc()
def set_enable_transmit_sa(self, sci: str, an: int, enable: bool):
if enable:
self.app_transmit_sc_table[sci] = {"encoding_an": an}
assert(
self.state_transmit_sa_table.wait(
"{}{}{}".format(
sci,
StateDBTable.SEPARATOR,
an)))
class MACsecInspector(object):
def __init__(self, dvs: conftest.DockerVirtualSwitch):
self.dvs = dvs
def __load_macsec_info(self, port_name: str) -> (bool, str):
return self.dvs.runcmd("ip macsec show {}".format(port_name))
def get_macsec_port(self, port_name: str) -> str:
exitcode, info = self.__load_macsec_info(port_name)
if exitcode != 0 or not info:
return ""
print(info)
return info
def get_macsec_sc(
self,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int) -> str:
info = self.get_macsec_port(port_name)
if not info:
return ""
macsec_system_identifier = macsec_system_identifier.translate(
str.maketrans("", "", ":.-"))
sci = "{}{}".format(
macsec_system_identifier,
str(macsec_port_identifier).zfill(4))
sc_pattern = r"(TXSC|RXSC):\s*{}[ \w,]+\n?(?:\s*\d:[,\w ]+\n?)*".format(
sci)
info = re.search(sc_pattern, info, re.IGNORECASE)
if not info:
return ""
print(info.group(0))
return info.group(0)
def get_macsec_sa(
self,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: str,
an: int) -> str:
info = self.get_macsec_sc(
port_name,
macsec_system_identifier,
macsec_port_identifier)
if not info:
return ""
sa_pattern = r"\s*{}:\s*PN\s*\d+[,\w ]+\n?".format(an)
info = re.search(sa_pattern, info, re.IGNORECASE)
if not info:
return ""
print(info.group(0))
return info.group(0)
class TestMACsec(object):
def init_macsec(
self,
wpa: WPASupplicantMock,
port_name: str,
local_mac_address: str,
macsec_port_identifier: int):
wpa.init_macsec_port(port_name)
wpa.config_macsec_port(port_name, {"enable_protect": True})
wpa.config_macsec_port(port_name, {"enable_encrypt": True})
wpa.config_macsec_port(
port_name,
{
"enable_replay_protect": True,
"replay_window": 0
})
wpa.set_macsec_control(port_name, False)
wpa.create_transmit_sc(
port_name,
local_mac_address,
macsec_port_identifier)
def establish_macsec(
self,
wpa: WPASupplicantMock,
port_name: str,
local_mac_address: str,
peer_mac_address: str,
macsec_port_identifier: int,
an: int,
sak: str,
packet_number: int,
auth_key: str,
ssci: int,
salt: str):
wpa.create_receive_sc(
port_name,
peer_mac_address,
macsec_port_identifier)
wpa.create_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
an,
sak,
auth_key,
packet_number,
ssci,
salt)
wpa.create_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
an,
sak,
auth_key,
packet_number,
ssci,
salt)
wpa.set_enable_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
an,
True)
wpa.set_macsec_control(port_name, True)
wpa.set_enable_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
an,
True)
def rekey_macsec(
self,
wpa: WPASupplicantMock,
port_name: str,
local_mac_address: str,
peer_mac_address: str,
macsec_port_identifier: int,
an: int,
last_an: int,
sak: str,
packet_number: int,
auth_key: str,
ssci: int,
salt: str):
wpa.create_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
an,
sak,
auth_key,
packet_number,
ssci,
salt)
wpa.create_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
an,
sak,
auth_key,
packet_number,
ssci,
salt)
wpa.set_enable_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
an,
True)
wpa.set_macsec_control(port_name, True)
wpa.set_enable_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
an,
True)
wpa.set_enable_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
last_an,
False)
wpa.delete_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
last_an)
wpa.set_enable_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
last_an,
False)
wpa.delete_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
last_an)
def deinit_macsec(
self,
wpa: WPASupplicantMock,
inspector: MACsecInspector,
port_name: str,
macsec_port: str,
local_mac_address: str,
peer_mac_address: str,
macsec_port_identifier: int,
last_an: int):
wpa.set_enable_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
last_an,
False)
wpa.delete_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
last_an)
assert(
not inspector.get_macsec_sa(
macsec_port,
peer_mac_address,
macsec_port_identifier,
last_an))
wpa.delete_receive_sc(
port_name,
peer_mac_address,
macsec_port_identifier)
assert(
not inspector.get_macsec_sc(
macsec_port,
peer_mac_address,
macsec_port_identifier))
wpa.set_enable_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
last_an,
False)
wpa.delete_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
last_an)
assert(
not inspector.get_macsec_sa(
macsec_port,
local_mac_address,
macsec_port_identifier,
last_an))
wpa.delete_transmit_sc(
port_name,
local_mac_address,
macsec_port_identifier)
assert(
not inspector.get_macsec_sc(
macsec_port,
local_mac_address,
macsec_port_identifier))
wpa.deinit_macsec_port(port_name)
def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog):
port_name = "Ethernet0"
local_mac_address = "00-15-5D-78-FF-C1"
peer_mac_address = "00-15-5D-78-FF-C2"
macsec_port_identifier = 1
macsec_port = "macsec_eth1"
sak = "0" * 32
auth_key = "0" * 32
packet_number = 1
ssci = 1
salt = "0" * 24
wpa = WPASupplicantMock(dvs)
inspector = MACsecInspector(dvs)
self.init_macsec(
wpa,
port_name,
local_mac_address,
macsec_port_identifier)
self.establish_macsec(
wpa,
port_name,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
0,
sak,
packet_number,
auth_key,
ssci,
salt)
assert(inspector.get_macsec_port(macsec_port))
assert(
inspector.get_macsec_sc(
macsec_port,
local_mac_address,
macsec_port_identifier))
assert(
inspector.get_macsec_sc(
macsec_port,
peer_mac_address,
macsec_port_identifier))
assert(
inspector.get_macsec_sa(
macsec_port,
local_mac_address,
macsec_port_identifier,
0))
assert(
inspector.get_macsec_sa(
macsec_port,
peer_mac_address,
macsec_port_identifier,
0))
self.rekey_macsec(
wpa,
port_name,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
1,
0,
sak,
packet_number,
auth_key,
ssci,
salt)
assert(
inspector.get_macsec_sa(
macsec_port,
local_mac_address,
macsec_port_identifier,
1))
assert(
inspector.get_macsec_sa(
macsec_port,
peer_mac_address,
macsec_port_identifier,
1))
assert(
not inspector.get_macsec_sa(
macsec_port,
local_mac_address,
macsec_port_identifier,
0))
assert(
not inspector.get_macsec_sa(
macsec_port,
peer_mac_address,
macsec_port_identifier,
0))
# Exit MACsec port
self.deinit_macsec(
wpa,
inspector,
port_name,
macsec_port,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
1)
assert(not inspector.get_macsec_port(macsec_port))
def test_macsec_attribute_change(self, dvs: conftest.DockerVirtualSwitch, testlog):
port_name = "Ethernet0"
local_mac_address = "00-15-5D-78-FF-C1"
peer_mac_address = "00-15-5D-78-FF-C2"
macsec_port_identifier = 1
macsec_port = "macsec_eth1"
sak = "0" * 32
auth_key = "0" * 32
packet_number = 1
ssci = 1
salt = "0" * 24
wpa = WPASupplicantMock(dvs)
inspector = MACsecInspector(dvs)
self.init_macsec(
wpa,
port_name,
local_mac_address,
macsec_port_identifier)
wpa.set_macsec_control(port_name, True)
wpa.config_macsec_port(port_name, {"enable_encrypt": False})
wpa.config_macsec_port(port_name, {"cipher_suite": "GCM-AES-256"})
self.establish_macsec(
wpa,
port_name,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
0,
sak,
packet_number,
auth_key,
ssci,
salt)
macsec_info = inspector.get_macsec_port(macsec_port)
assert("encrypt off" in macsec_info)
assert("GCM-AES-256" in macsec_info)
self.deinit_macsec(
wpa,
inspector,
port_name,
macsec_port,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
0)
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down
# before retrying
def test_nonflaky_dummy():
pass | |
delete_disconnected_nodes.py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
def | (gd):
# delete all nodes with no inputs and outputs
empty_nodes = []
for k, v in gd.items():
if (
len(gd[k].inputs) == 0
and len(gd[k].outputs) == 0
and len(gd[k].control_inputs) == 0
and len(gd[k].control_outputs) == 0
and gd[k].op != "Placeholder"
):
empty_nodes.append(k)
for k in empty_nodes:
del gd[k]
| delete_disconnected_nodes |
solute.rs | // trait Solution{
// fn two_sum(nums: Vec<i32>, target: i32) -> Vec<i32> ;
// }
// use std::rc::Rc;
// use std::cell::RefCell;
// use std::collections::HashMap;
// use std::collections::BinaryHeap;
#[derive(Debug)]
pub struct Solution {}
impl Solution {
pub fn backspace_compare(s: String, t: String) -> bool |
}
| {
let mut rs=String::new();
let mut rt=String::new();
for i in s.chars() {
if i=='#'{
rs.pop();
}else{
rs.push(i);
}
}
for j in t.chars() {
if j=='#'{
rt.pop();
}else{
rt.push(j);
}
}
// println!("{:?} {:?}",rs,rt);
return rs==rt;
} |
message_codec.rs | use byteorder::{ByteOrder, NetworkEndian, WriteBytesExt};
use bytes::{BufMut, BytesMut};
use std::fmt::Debug;
use tokio_util::codec::{Decoder, Encoder};
use crate::communication::{CodecError, InterProcessMessage, MessageMetadata};
const HEADER_SIZE: usize = 8;
#[derive(Debug)]
enum DecodeStatus {
Header,
Metadata {
metadata_size: usize,
data_size: usize,
},
Data {
data_size: usize,
},
}
/// Encodes messages into bytes, and decodes bytes into an [`InterProcessMessage`].
///
/// For each message, the codec first writes the size of its message header,
/// then the message header, and finally the content of the message.
#[derive(Debug)]
pub struct MessageCodec {
/// Current part of the message to decode.
status: DecodeStatus,
msg_metadata: Option<MessageMetadata>,
}
impl MessageCodec {
pub fn new() -> MessageCodec {
MessageCodec {
status: DecodeStatus::Header,
msg_metadata: None,
}
}
}
impl Decoder for MessageCodec {
type Item = InterProcessMessage;
type Error = CodecError;
/// Decodes a sequence of bytes into an InterProcessMessage.
///
/// Reads the header size, then the header, and finally the message.
/// Reserves memory for the entire message to reduce upon reading the header
/// costly memory allocations.
fn decode(&mut self, buf: &mut BytesMut) -> Result<Option<InterProcessMessage>, CodecError> {
match self.status {
// Decode the header and reserve
DecodeStatus::Header => {
if buf.len() >= HEADER_SIZE {
let header = buf.split_to(HEADER_SIZE);
let metadata_size = NetworkEndian::read_u32(&header[0..4]) as usize;
let data_size = NetworkEndian::read_u32(&header[4..8]) as usize;
self.status = DecodeStatus::Metadata {
metadata_size,
data_size,
};
// Reserve space in the buffer for the rest of the message and the next header.
buf.reserve(metadata_size + data_size + HEADER_SIZE);
self.decode(buf)
} else {
Ok(None)
}
}
// Decode the metadata.
DecodeStatus::Metadata {
metadata_size,
data_size,
} => {
if buf.len() >= metadata_size {
let metadata_bytes = buf.split_to(metadata_size);
let metadata: MessageMetadata =
bincode::deserialize(&metadata_bytes).map_err(CodecError::BincodeError)?;
self.msg_metadata = Some(metadata);
self.status = DecodeStatus::Data { data_size };
self.decode(buf)
} else {
Ok(None)
}
}
// Decode the data.
DecodeStatus::Data { data_size } => {
if buf.len() >= data_size {
let bytes = buf.split_to(data_size);
let msg = InterProcessMessage::new_serialized(
bytes,
self.msg_metadata.take().unwrap(),
);
self.status = DecodeStatus::Header;
Ok(Some(msg))
} else {
Ok(None)
}
}
}
}
} | /// Encodes a InterProcessMessage into a buffer.
///
/// First writes the header_size, then the header, and finally the
/// serialized message.
fn encode(&mut self, msg: InterProcessMessage, buf: &mut BytesMut) -> Result<(), CodecError> {
// Serialize and write the header.
let (metadata, data) = match msg {
InterProcessMessage::Deserialized { metadata, data } => (metadata, data),
InterProcessMessage::Serialized {
metadata: _,
bytes: _,
} => unreachable!(),
};
// Allocate memory in the buffer for serialized metadata and data
// to reduce memory allocations.
let metadata_size = bincode::serialized_size(&metadata).map_err(CodecError::from)?;
let data_size = data.serialized_size().unwrap();
buf.reserve(HEADER_SIZE + metadata_size as usize + data_size);
// Serialize directly into the buffer.
let mut writer = buf.writer();
writer.write_u32::<NetworkEndian>(metadata_size as u32)?;
writer.write_u32::<NetworkEndian>(data_size as u32)?;
bincode::serialize_into(&mut writer, &metadata).map_err(CodecError::from)?;
data.encode_into(buf).unwrap();
Ok(())
}
}
impl Default for MessageCodec {
fn default() -> Self {
Self::new()
}
} |
impl Encoder<InterProcessMessage> for MessageCodec {
type Error = CodecError;
|
Subsets and Splits