file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
build_reduced_graph.rs
//! Reduced graph building. //! //! Here we build the "reduced graph": the graph of the module tree without //! any imports resolved. use crate::macros::{InvocationData, ParentScope, LegacyScope}; use crate::resolve_imports::ImportDirective; use crate::resolve_imports::ImportDirectiveSubclass::{self, GlobImport, SingleImport}; use crate::{Module, ModuleData, ModuleKind, NameBinding, NameBindingKind, Segment, ToNameBinding}; use crate::{ModuleOrUniformRoot, PerNS, Resolver, ResolverArenas, ExternPreludeEntry}; use crate::Namespace::{self, TypeNS, ValueNS, MacroNS}; use crate::{resolve_error, resolve_struct_error, ResolutionError}; use rustc::bug; use rustc::hir::def::{self, *}; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, LOCAL_CRATE, DefId}; use rustc::ty; use rustc::middle::cstore::CrateStore; use rustc_metadata::cstore::LoadedMacro; use std::cell::Cell; use std::ptr; use rustc_data_structures::sync::Lrc; use errors::Applicability; use syntax::ast::{Name, Ident}; use syntax::attr; use syntax::ast::{self, Block, ForeignItem, ForeignItemKind, Item, ItemKind, NodeId}; use syntax::ast::{MetaItemKind, StmtKind, TraitItem, TraitItemKind, Variant}; use syntax::ext::base::{MacroKind, SyntaxExtension}; use syntax::ext::base::Determinacy::Undetermined; use syntax::ext::hygiene::Mark; use syntax::ext::tt::macro_rules; use syntax::feature_gate::is_builtin_attr; use syntax::parse::token::{self, Token}; use syntax::span_err; use syntax::std_inject::injected_crate_name; use syntax::symbol::{keywords, sym}; use syntax::visit::{self, Visitor}; use syntax_pos::{Span, DUMMY_SP}; use log::debug; type Res = def::Res<NodeId>; impl<'a> ToNameBinding<'a> for (Module<'a>, ty::Visibility, Span, Mark) { fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> { arenas.alloc_name_binding(NameBinding { kind: NameBindingKind::Module(self.0), ambiguity: None, vis: self.1, span: self.2, expansion: self.3, }) } } impl<'a> ToNameBinding<'a> for (Res, ty::Visibility, Span, Mark) { fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> { arenas.alloc_name_binding(NameBinding { kind: NameBindingKind::Res(self.0, false), ambiguity: None, vis: self.1, span: self.2, expansion: self.3, }) } } pub(crate) struct IsMacroExport; impl<'a> ToNameBinding<'a> for (Res, ty::Visibility, Span, Mark, IsMacroExport) { fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> { arenas.alloc_name_binding(NameBinding { kind: NameBindingKind::Res(self.0, true), ambiguity: None, vis: self.1, span: self.2, expansion: self.3, }) } } impl<'a> Resolver<'a> { /// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined; /// otherwise, reports an error. pub fn define<T>(&mut self, parent: Module<'a>, ident: Ident, ns: Namespace, def: T) where T: ToNameBinding<'a>, { let binding = def.to_name_binding(self.arenas); if let Err(old_binding) = self.try_define(parent, ident, ns, binding) { self.report_conflict(parent, ident, ns, old_binding, &binding); } } fn block_needs_anonymous_module(&mut self, block: &Block) -> bool { // If any statements are items, we need to create an anonymous module block.stmts.iter().any(|statement| match statement.node { StmtKind::Item(_) | StmtKind::Mac(_) => true, _ => false, }) } fn insert_field_names(&mut self, def_id: DefId, field_names: Vec<Name>) { if !field_names.is_empty() { self.field_names.insert(def_id, field_names); } } fn build_reduced_graph_for_use_tree( &mut self, // This particular use tree use_tree: &ast::UseTree, id: NodeId, parent_prefix: &[Segment], nested: bool, // The whole `use` item parent_scope: ParentScope<'a>, item: &Item, vis: ty::Visibility, root_span: Span, ) { debug!("build_reduced_graph_for_use_tree(parent_prefix={:?}, use_tree={:?}, nested={})", parent_prefix, use_tree, nested); let mut prefix_iter = parent_prefix.iter().cloned() .chain(use_tree.prefix.segments.iter().map(|seg| seg.into())).peekable(); // On 2015 edition imports are resolved as crate-relative by default, // so prefixes are prepended with crate root segment if necessary. // The root is prepended lazily, when the first non-empty prefix or terminating glob // appears, so imports in braced groups can have roots prepended independently. let is_glob = if let ast::UseTreeKind::Glob = use_tree.kind { true } else { false }; let crate_root = match prefix_iter.peek() { Some(seg) if !seg.ident.is_path_segment_keyword() && seg.ident.span.rust_2015() => { Some(seg.ident.span.ctxt()) } None if is_glob && use_tree.span.rust_2015() => { Some(use_tree.span.ctxt()) } _ => None, }.map(|ctxt| Segment::from_ident(Ident::new( keywords::PathRoot.name(), use_tree.prefix.span.shrink_to_lo().with_ctxt(ctxt) ))); let prefix = crate_root.into_iter().chain(prefix_iter).collect::<Vec<_>>(); debug!("build_reduced_graph_for_use_tree: prefix={:?}", prefix); let empty_for_self = |prefix: &[Segment]| { prefix.is_empty() || prefix.len() == 1 && prefix[0].ident.name == keywords::PathRoot.name() }; match use_tree.kind { ast::UseTreeKind::Simple(rename, ..) => { let mut ident = use_tree.ident().gensym_if_underscore(); let mut module_path = prefix; let mut source = module_path.pop().unwrap(); let mut type_ns_only = false; if nested { // Correctly handle `self` if source.ident.name == keywords::SelfLower.name() { type_ns_only = true; if empty_for_self(&module_path) { resolve_error( self, use_tree.span, ResolutionError:: SelfImportOnlyInImportListWithNonEmptyPrefix ); return; } // Replace `use foo::self;` with `use foo;` source = module_path.pop().unwrap(); if rename.is_none() { ident = source.ident; } } } else { // Disallow `self` if source.ident.name == keywords::SelfLower.name() { resolve_error(self, use_tree.span, ResolutionError::SelfImportsOnlyAllowedWithin); } // Disallow `use $crate;` if source.ident.name == keywords::DollarCrate.name() && module_path.is_empty() { let crate_root = self.resolve_crate_root(source.ident); let crate_name = match crate_root.kind { ModuleKind::Def(.., name) => name, ModuleKind::Block(..) => unreachable!(), }; // HACK(eddyb) unclear how good this is, but keeping `$crate` // in `source` breaks `src/test/compile-fail/import-crate-var.rs`, // while the current crate doesn't have a valid `crate_name`. if crate_name != keywords::Invalid.name() { // `crate_name` should not be interpreted as relative. module_path.push(Segment { ident: Ident { name: keywords::PathRoot.name(), span: source.ident.span, }, id: Some(self.session.next_node_id()), }); source.ident.name = crate_name; } if rename.is_none() { ident.name = crate_name; } self.session.struct_span_warn(item.span, "`$crate` may not be imported") .note("`use $crate;` was erroneously allowed and \ will become a hard error in a future release") .emit(); } } if ident.name == keywords::Crate.name() { self.session.span_err(ident.span, "crate root imports need to be explicitly named: \ `use crate as name;`"); } let subclass = SingleImport { source: source.ident, target: ident, source_bindings: PerNS { type_ns: Cell::new(Err(Undetermined)), value_ns: Cell::new(Err(Undetermined)), macro_ns: Cell::new(Err(Undetermined)), }, target_bindings: PerNS { type_ns: Cell::new(None), value_ns: Cell::new(None), macro_ns: Cell::new(None), }, type_ns_only, nested, }; self.add_import_directive( module_path, subclass, use_tree.span, id, item, root_span, item.id, vis, parent_scope, ); } ast::UseTreeKind::Glob => { let subclass = GlobImport { is_prelude: attr::contains_name(&item.attrs, sym::prelude_import), max_vis: Cell::new(ty::Visibility::Invisible), }; self.add_import_directive( prefix, subclass, use_tree.span, id, item, root_span, item.id, vis, parent_scope, ); } ast::UseTreeKind::Nested(ref items) => { // Ensure there is at most one `self` in the list let self_spans = items.iter().filter_map(|&(ref use_tree, _)| { if let ast::UseTreeKind::Simple(..) = use_tree.kind { if use_tree.ident().name == keywords::SelfLower.name() { return Some(use_tree.span); } } None }).collect::<Vec<_>>(); if self_spans.len() > 1 { let mut e = resolve_struct_error(self, self_spans[0], ResolutionError::SelfImportCanOnlyAppearOnceInTheList); for other_span in self_spans.iter().skip(1) { e.span_label(*other_span, "another `self` import appears here"); } e.emit(); } for &(ref tree, id) in items { self.build_reduced_graph_for_use_tree( // This particular use tree tree, id, &prefix, true, // The whole `use` item parent_scope.clone(), item, vis, root_span, ); } // Empty groups `a::b::{}` are turned into synthetic `self` imports // `a::b::c::{self as __dummy}`, so that their prefixes are correctly // resolved and checked for privacy/stability/etc. if items.is_empty() && !empty_for_self(&prefix) { let new_span = prefix[prefix.len() - 1].ident.span; let tree = ast::UseTree { prefix: ast::Path::from_ident( Ident::new(keywords::SelfLower.name(), new_span) ), kind: ast::UseTreeKind::Simple( Some(Ident::new(Name::gensym("__dummy"), new_span)), ast::DUMMY_NODE_ID, ast::DUMMY_NODE_ID, ), span: use_tree.span, }; self.build_reduced_graph_for_use_tree( // This particular use tree &tree, id, &prefix, true, // The whole `use` item parent_scope, item, ty::Visibility::Invisible, root_span, ); } } } } /// Constructs the reduced graph for one item. fn build_reduced_graph_for_item(&mut self, item: &Item, parent_scope: ParentScope<'a>)
// Constructs the reduced graph for one variant. Variants exist in the // type and value namespaces. fn build_reduced_graph_for_variant(&mut self, variant: &Variant, parent: Module<'a>, vis: ty::Visibility, expansion: Mark) { let ident = variant.node.ident; // Define a name in the type namespace. let def_id = self.definitions.local_def_id(variant.node.id); let res = Res::Def(DefKind::Variant, def_id); self.define(parent, ident, TypeNS, (res, vis, variant.span, expansion)); // If the variant is marked as non_exhaustive then lower the visibility to within the // crate. let mut ctor_vis = vis; let has_non_exhaustive = attr::contains_name(&variant.node.attrs, sym::non_exhaustive); if has_non_exhaustive && vis == ty::Visibility::Public { ctor_vis = ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)); } // Define a constructor name in the value namespace. // Braced variants, unlike structs, generate unusable names in // value namespace, they are reserved for possible future use. // It's ok to use the variant's id as a ctor id since an // error will be reported on any use of such resolution anyway. let ctor_node_id = variant.node.data.ctor_id().unwrap_or(variant.node.id); let ctor_def_id = self.definitions.local_def_id(ctor_node_id); let ctor_kind = CtorKind::from_ast(&variant.node.data); let ctor_res = Res::Def(DefKind::Ctor(CtorOf::Variant, ctor_kind), ctor_def_id); self.define(parent, ident, ValueNS, (ctor_res, ctor_vis, variant.span, expansion)); } /// Constructs the reduced graph for one foreign item. fn build_reduced_graph_for_foreign_item(&mut self, item: &ForeignItem, expansion: Mark) { let (res, ns) = match item.node { ForeignItemKind::Fn(..) => { (Res::Def(DefKind::Fn, self.definitions.local_def_id(item.id)), ValueNS) } ForeignItemKind::Static(..) => { (Res::Def(DefKind::Static, self.definitions.local_def_id(item.id)), ValueNS) } ForeignItemKind::Ty => { (Res::Def(DefKind::ForeignTy, self.definitions.local_def_id(item.id)), TypeNS) } ForeignItemKind::Macro(_) => unreachable!(), }; let parent = self.current_module; let vis = self.resolve_visibility(&item.vis); self.define(parent, item.ident, ns, (res, vis, item.span, expansion)); } fn build_reduced_graph_for_block(&mut self, block: &Block, expansion: Mark) { let parent = self.current_module; if self.block_needs_anonymous_module(block) { let module = self.new_module(parent, ModuleKind::Block(block.id), parent.normal_ancestor_id, expansion, block.span); self.block_map.insert(block.id, module); self.current_module = module; // Descend into the block. } } /// Builds the reduced graph for a single item in an external crate. fn build_reduced_graph_for_external_crate_res( &mut self, parent: Module<'a>, child: Export<ast::NodeId>, ) { let Export { ident, res, vis, span } = child; // FIXME: We shouldn't create the gensym here, it should come from metadata, // but metadata cannot encode gensyms currently, so we create it here. // This is only a guess, two equivalent idents may incorrectly get different gensyms here. let ident = ident.gensym_if_underscore(); let expansion = Mark::root(); // FIXME(jseyfried) intercrate hygiene match res { Res::Def(kind @ DefKind::Mod, def_id) | Res::Def(kind @ DefKind::Enum, def_id) => { let module = self.new_module(parent, ModuleKind::Def(kind, def_id, ident.name), def_id, expansion, span); self.define(parent, ident, TypeNS, (module, vis, DUMMY_SP, expansion)); } Res::Def(DefKind::Variant, _) | Res::Def(DefKind::TyAlias, _) | Res::Def(DefKind::ForeignTy, _) | Res::Def(DefKind::Existential, _) | Res::Def(DefKind::TraitAlias, _) | Res::PrimTy(..) | Res::ToolMod => { self.define(parent, ident, TypeNS, (res, vis, DUMMY_SP, expansion)); } Res::Def(DefKind::Fn, _) | Res::Def(DefKind::Static, _) | Res::Def(DefKind::Const, _) | Res::Def(DefKind::Ctor(CtorOf::Variant, ..), _) => { self.define(parent, ident, ValueNS, (res, vis, DUMMY_SP, expansion)); } Res::Def(DefKind::Ctor(CtorOf::Struct, ..), def_id) => { self.define(parent, ident, ValueNS, (res, vis, DUMMY_SP, expansion)); if let Some(struct_def_id) = self.cstore.def_key(def_id).parent .map(|index| DefId { krate: def_id.krate, index: index }) { self.struct_constructors.insert(struct_def_id, (res, vis)); } } Res::Def(DefKind::Trait, def_id) => { let module_kind = ModuleKind::Def(DefKind::Trait, def_id, ident.name); let module = self.new_module(parent, module_kind, parent.normal_ancestor_id, expansion, span); self.define(parent, ident, TypeNS, (module, vis, DUMMY_SP, expansion)); for child in self.cstore.item_children_untracked(def_id, self.session) { let res = child.res.map_id(|_| panic!("unexpected id")); let ns = if let Res::Def(DefKind::AssociatedTy, _) = res { TypeNS } else { ValueNS }; self.define(module, child.ident, ns, (res, ty::Visibility::Public, DUMMY_SP, expansion)); if self.cstore.associated_item_cloned_untracked(child.res.def_id()) .method_has_self_argument { self.has_self.insert(res.def_id()); } } module.populated.set(true); } Res::Def(DefKind::Struct, def_id) | Res::Def(DefKind::Union, def_id) => { self.define(parent, ident, TypeNS, (res, vis, DUMMY_SP, expansion)); // Record field names for error reporting. let field_names = self.cstore.struct_field_names_untracked(def_id); self.insert_field_names(def_id, field_names); } Res::Def(DefKind::Macro(..), _) | Res::NonMacroAttr(..) => { self.define(parent, ident, MacroNS, (res, vis, DUMMY_SP, expansion)); } _ => bug!("unexpected resolution: {:?}", res) } } pub fn get_module(&mut self, def_id: DefId) -> Module<'a> { if def_id.krate == LOCAL_CRATE { return self.module_map[&def_id] } let macros_only = self.cstore.dep_kind_untracked(def_id.krate).macros_only(); if let Some(&module) = self.extern_module_map.get(&(def_id, macros_only)) { return module; } let (name, parent) = if def_id.index == CRATE_DEF_INDEX { (self.cstore.crate_name_untracked(def_id.krate).as_interned_str(), None) } else { let def_key = self.cstore.def_key(def_id); (def_key.disambiguated_data.data.get_opt_name().unwrap(), Some(self.get_module(DefId { index: def_key.parent.unwrap(), ..def_id }))) }; let kind = ModuleKind::Def(DefKind::Mod, def_id, name.as_symbol()); let module = self.arenas.alloc_module(ModuleData::new(parent, kind, def_id, Mark::root(), DUMMY_SP)); self.extern_module_map.insert((def_id, macros_only), module); module } pub fn macro_def_scope(&mut self, expansion: Mark) -> Module<'a> { let def_id = self.macro_defs[&expansion]; if let Some(id) = self.definitions.as_local_node_id(def_id) { self.local_macro_def_scopes[&id] } else if def_id.krate == CrateNum::BuiltinMacros { self.injected_crate.unwrap_or(self.graph_root) } else { let module_def_id = ty::DefIdTree::parent(&*self, def_id).unwrap(); self.get_module(module_def_id) } } pub fn get_macro(&mut self, res: Res) -> Lrc<SyntaxExtension> { let def_id = match res { Res::Def(DefKind::Macro(..), def_id) => def_id, Res::NonMacroAttr(attr_kind) => return Lrc::new(SyntaxExtension::NonMacroAttr { mark_used: attr_kind == NonMacroAttrKind::Tool, }), _ => panic!("expected `DefKind::Macro` or `Res::NonMacroAttr`"), }; if let Some(ext) = self.macro_map.get(&def_id) { return ext.clone(); } let macro_def = match self.cstore.load_macro_untracked(def_id, &self.session) { LoadedMacro::MacroDef(macro_def) => macro_def, LoadedMacro::ProcMacro(ext) => return ext, }; let ext = Lrc::new(macro_rules::compile(&self.session.parse_sess, &self.session.features_untracked(), &macro_def, self.cstore.crate_edition_untracked(def_id.krate))); self.macro_map.insert(def_id, ext.clone()); ext } /// Ensures that the reduced graph rooted at the given external module /// is built, building it if it is not. pub fn populate_module_if_necessary(&mut self, module: Module<'a>) { if module.populated.get() { return } let def_id = module.def_id().unwrap(); for child in self.cstore.item_children_untracked(def_id, self.session) { let child = child.map_id(|_| panic!("unexpected id")); self.build_reduced_graph_for_external_crate_res(module, child); } module.populated.set(true) } fn legacy_import_macro(&mut self, name: Name, binding: &'a NameBinding<'a>, span: Span, allow_shadowing: bool) { if self.macro_use_prelude.insert(name, binding).is_some() && !allow_shadowing { let msg = format!("`{}` is already in scope", name); let note = "macro-expanded `#[macro_use]`s may not shadow existing macros (see RFC 1560)"; self.session.struct_span_err(span, &msg).note(note).emit(); } } /// Returns `true` if we should consider the underlying `extern crate` to be used. fn process_legacy_macro_imports(&mut self, item: &Item, module: Module<'a>, parent_scope: &ParentScope<'a>) -> bool { let mut import_all = None; let mut single_imports = Vec::new(); for attr in &item.attrs { if attr.check_name(sym::macro_use) { if self.current_module.parent.is_some() { span_err!(self.session, item.span, E0468, "an `extern crate` loading macros must be at the crate root"); } if let ItemKind::ExternCrate(Some(orig_name)) = item.node { if orig_name == keywords::SelfLower.name() { self.session.span_err(attr.span, "`macro_use` is not supported on `extern crate self`"); } } let ill_formed = |span| span_err!(self.session, span, E0466, "bad macro import"); match attr.meta() { Some(meta) => match meta.node { MetaItemKind::Word => { import_all = Some(meta.span); break; } MetaItemKind::List(nested_metas) => for nested_meta in nested_metas { match nested_meta.ident() { Some(ident) if nested_meta.is_word() => single_imports.push(ident), _ => ill_formed(nested_meta.span()), } } MetaItemKind::NameValue(..) => ill_formed(meta.span), } None => ill_formed(attr.span), } } } let arenas = self.arenas; let macro_use_directive = |span| arenas.alloc_import_directive(ImportDirective { root_id: item.id, id: item.id, parent_scope: parent_scope.clone(), imported_module: Cell::new(Some(ModuleOrUniformRoot::Module(module))), subclass: ImportDirectiveSubclass::MacroUse, use_span_with_attributes: item.span_with_attributes(), has_attributes: !item.attrs.is_empty(), use_span: item.span, root_span: span, span, module_path: Vec::new(), vis: Cell::new(ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX))), used: Cell::new(false), }); let allow_shadowing = parent_scope.expansion == Mark::root(); if let Some(span) = import_all { let directive = macro_use_directive(span); self.potentially_unused_imports.push(directive); module.for_each_child(|ident, ns, binding| if ns == MacroNS { let imported_binding = self.import(binding, directive); self.legacy_import_macro(ident.name, imported_binding, span, allow_shadowing); }); } else { for ident in single_imports.iter().cloned() { let result = self.resolve_ident_in_module( ModuleOrUniformRoot::Module(module), ident, MacroNS, None, false, ident.span, ); if let Ok(binding) = result { let directive = macro_use_directive(ident.span); self.potentially_unused_imports.push(directive); let imported_binding = self.import(binding, directive); self.legacy_import_macro(ident.name, imported_binding, ident.span, allow_shadowing); } else { span_err!(self.session, ident.span, E0469, "imported macro not found"); } } } import_all.is_some() || !single_imports.is_empty() } /// Returns `true` if this attribute list contains `macro_use`. fn contains_macro_use(&mut self, attrs: &[ast::Attribute]) -> bool { for attr in attrs { if attr.check_name(sym::macro_escape) { let msg = "macro_escape is a deprecated synonym for macro_use"; let mut err = self.session.struct_span_warn(attr.span, msg); if let ast::AttrStyle::Inner = attr.style { err.help("consider an outer attribute, #[macro_use] mod ...").emit(); } else { err.emit(); } } else if !attr.check_name(sym::macro_use) { continue; } if !attr.is_word() { self.session.span_err(attr.span, "arguments to macro_use are not allowed here"); } return true; } false } } pub struct BuildReducedGraphVisitor<'a, 'b: 'a> { pub resolver: &'a mut Resolver<'b>, pub current_legacy_scope: LegacyScope<'b>, pub expansion: Mark, } impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> { fn visit_invoc(&mut self, id: ast::NodeId) -> &'b InvocationData<'b> { let mark = id.placeholder_to_mark(); self.resolver.current_module.unresolved_invocations.borrow_mut().insert(mark); let invocation = self.resolver.invocations[&mark]; invocation.module.set(self.resolver.current_module); invocation.parent_legacy_scope.set(self.current_legacy_scope); invocation } } macro_rules! method { ($visit:ident: $ty:ty, $invoc:path, $walk:ident) => { fn $visit(&mut self, node: &'a $ty) { if let $invoc(..) = node.node { self.visit_invoc(node.id); } else { visit::$walk(self, node); } } } } impl<'a, 'b> Visitor<'a> for BuildReducedGraphVisitor<'a, 'b> { method!(visit_impl_item: ast::ImplItem, ast::ImplItemKind::Macro, walk_impl_item); method!(visit_expr: ast::Expr, ast::ExprKind::Mac, walk_expr); method!(visit_pat: ast::Pat, ast::PatKind::Mac, walk_pat); method!(visit_ty: ast::Ty, ast::TyKind::Mac, walk_ty); fn visit_item(&mut self, item: &'a Item) { let macro_use = match item.node { ItemKind::MacroDef(..) => { self.resolver.define_macro(item, self.expansion, &mut self.current_legacy_scope); return } ItemKind::Mac(..) => { self.current_legacy_scope = LegacyScope::Invocation(self.visit_invoc(item.id)); return } ItemKind::Mod(..) => self.resolver.contains_macro_use(&item.attrs), _ => false, }; let orig_current_module = self.resolver.current_module; let orig_current_legacy_scope = self.current_legacy_scope; let parent_scope = ParentScope { module: self.resolver.current_module, expansion: self.expansion, legacy: self.current_legacy_scope, derives: Vec::new(), }; self.resolver.build_reduced_graph_for_item(item, parent_scope); visit::walk_item(self, item); self.resolver.current_module = orig_current_module; if !macro_use { self.current_legacy_scope = orig_current_legacy_scope; } } fn visit_stmt(&mut self, stmt: &'a ast::Stmt) { if let ast::StmtKind::Mac(..) = stmt.node { self.current_legacy_scope = LegacyScope::Invocation(self.visit_invoc(stmt.id)); } else { visit::walk_stmt(self, stmt); } } fn visit_foreign_item(&mut self, foreign_item: &'a ForeignItem) { if let ForeignItemKind::Macro(_) = foreign_item.node { self.visit_invoc(foreign_item.id); return; } self.resolver.build_reduced_graph_for_foreign_item(foreign_item, self.expansion); visit::walk_foreign_item(self, foreign_item); } fn visit_block(&mut self, block: &'a Block) { let orig_current_module = self.resolver.current_module; let orig_current_legacy_scope = self.current_legacy_scope; self.resolver.build_reduced_graph_for_block(block, self.expansion); visit::walk_block(self, block); self.resolver.current_module = orig_current_module; self.current_legacy_scope = orig_current_legacy_scope; } fn visit_trait_item(&mut self, item: &'a TraitItem) { let parent = self.resolver.current_module; if let TraitItemKind::Macro(_) = item.node { self.visit_invoc(item.id); return } // Add the item to the trait info. let item_def_id = self.resolver.definitions.local_def_id(item.id); let (res, ns) = match item.node { TraitItemKind::Const(..) => (Res::Def(DefKind::AssociatedConst, item_def_id), ValueNS), TraitItemKind::Method(ref sig, _) => { if sig.decl.has_self() { self.resolver.has_self.insert(item_def_id); } (Res::Def(DefKind::Method, item_def_id), ValueNS) } TraitItemKind::Type(..) => (Res::Def(DefKind::AssociatedTy, item_def_id), TypeNS), TraitItemKind::Macro(_) => bug!(), // handled above }; let vis = ty::Visibility::Public; self.resolver.define(parent, item.ident, ns, (res, vis, item.span, self.expansion)); self.resolver.current_module = parent.parent.unwrap(); // nearest normal ancestor visit::walk_trait_item(self, item); self.resolver.current_module = parent; } fn visit_token(&mut self, t: Token) { if let Token::Interpolated(nt) = t { if let token::NtExpr(ref expr) = *nt { if let ast::ExprKind::Mac(..) = expr.node { self.visit_invoc(expr.id); } } } } fn visit_attribute(&mut self, attr: &'a ast::Attribute) { if !attr.is_sugared_doc && is_builtin_attr(attr) { let parent_scope = ParentScope { module: self.resolver.current_module.nearest_item_scope(), expansion: self.expansion, legacy: self.current_legacy_scope, // Let's hope discerning built-in attributes from derive helpers is not necessary derives: Vec::new(), }; parent_scope.module.builtin_attrs.borrow_mut().push(( attr.path.segments[0].ident, parent_scope )); } visit::walk_attribute(self, attr); } }
{ let parent = parent_scope.module; let expansion = parent_scope.expansion; let ident = item.ident.gensym_if_underscore(); let sp = item.span; let vis = self.resolve_visibility(&item.vis); match item.node { ItemKind::Use(ref use_tree) => { self.build_reduced_graph_for_use_tree( // This particular use tree use_tree, item.id, &[], false, // The whole `use` item parent_scope, item, vis, use_tree.span, ); } ItemKind::ExternCrate(orig_name) => { let module = if orig_name.is_none() && ident.name == keywords::SelfLower.name() { self.session .struct_span_err(item.span, "`extern crate self;` requires renaming") .span_suggestion( item.span, "try", "extern crate self as name;".into(), Applicability::HasPlaceholders, ) .emit(); return; } else if orig_name == Some(keywords::SelfLower.name()) { self.graph_root } else { let crate_id = self.crate_loader.process_extern_crate(item, &self.definitions); self.get_module(DefId { krate: crate_id, index: CRATE_DEF_INDEX }) }; self.populate_module_if_necessary(module); if injected_crate_name().map_or(false, |name| ident.name.as_str() == name) { self.injected_crate = Some(module); } let used = self.process_legacy_macro_imports(item, module, &parent_scope); let binding = (module, ty::Visibility::Public, sp, expansion).to_name_binding(self.arenas); let directive = self.arenas.alloc_import_directive(ImportDirective { root_id: item.id, id: item.id, parent_scope, imported_module: Cell::new(Some(ModuleOrUniformRoot::Module(module))), subclass: ImportDirectiveSubclass::ExternCrate { source: orig_name, target: ident, }, has_attributes: !item.attrs.is_empty(), use_span_with_attributes: item.span_with_attributes(), use_span: item.span, root_span: item.span, span: item.span, module_path: Vec::new(), vis: Cell::new(vis), used: Cell::new(used), }); self.potentially_unused_imports.push(directive); let imported_binding = self.import(binding, directive); if ptr::eq(self.current_module, self.graph_root) { if let Some(entry) = self.extern_prelude.get(&ident.modern()) { if expansion != Mark::root() && orig_name.is_some() && entry.extern_crate_item.is_none() { self.session.span_err(item.span, "macro-expanded `extern crate` items \ cannot shadow names passed with \ `--extern`"); } } let entry = self.extern_prelude.entry(ident.modern()) .or_insert(ExternPreludeEntry { extern_crate_item: None, introduced_by_item: true, }); entry.extern_crate_item = Some(imported_binding); if orig_name.is_some() { entry.introduced_by_item = true; } } self.define(parent, ident, TypeNS, imported_binding); } ItemKind::GlobalAsm(..) => {} ItemKind::Mod(..) if ident == keywords::Invalid.ident() => {} // Crate root ItemKind::Mod(..) => { let def_id = self.definitions.local_def_id(item.id); let module_kind = ModuleKind::Def(DefKind::Mod, def_id, ident.name); let module = self.arenas.alloc_module(ModuleData { no_implicit_prelude: parent.no_implicit_prelude || { attr::contains_name(&item.attrs, sym::no_implicit_prelude) }, ..ModuleData::new(Some(parent), module_kind, def_id, expansion, item.span) }); self.define(parent, ident, TypeNS, (module, vis, sp, expansion)); self.module_map.insert(def_id, module); // Descend into the module. self.current_module = module; } // Handled in `rustc_metadata::{native_libs,link_args}` ItemKind::ForeignMod(..) => {} // These items live in the value namespace. ItemKind::Static(..) => { let res = Res::Def(DefKind::Static, self.definitions.local_def_id(item.id)); self.define(parent, ident, ValueNS, (res, vis, sp, expansion)); } ItemKind::Const(..) => { let res = Res::Def(DefKind::Const, self.definitions.local_def_id(item.id)); self.define(parent, ident, ValueNS, (res, vis, sp, expansion)); } ItemKind::Fn(..) => { let res = Res::Def(DefKind::Fn, self.definitions.local_def_id(item.id)); self.define(parent, ident, ValueNS, (res, vis, sp, expansion)); // Functions introducing procedural macros reserve a slot // in the macro namespace as well (see #52225). if attr::contains_name(&item.attrs, sym::proc_macro) || attr::contains_name(&item.attrs, sym::proc_macro_attribute) { let res = Res::Def(DefKind::Macro(MacroKind::ProcMacroStub), res.def_id()); self.define(parent, ident, MacroNS, (res, vis, sp, expansion)); } if let Some(attr) = attr::find_by_name(&item.attrs, sym::proc_macro_derive) { if let Some(trait_attr) = attr.meta_item_list().and_then(|list| list.get(0).cloned()) { if let Some(ident) = trait_attr.ident() { let res = Res::Def( DefKind::Macro(MacroKind::ProcMacroStub), res.def_id(), ); self.define(parent, ident, MacroNS, (res, vis, ident.span, expansion)); } } } } // These items live in the type namespace. ItemKind::Ty(..) => { let res = Res::Def(DefKind::TyAlias, self.definitions.local_def_id(item.id)); self.define(parent, ident, TypeNS, (res, vis, sp, expansion)); } ItemKind::Existential(_, _) => { let res = Res::Def(DefKind::Existential, self.definitions.local_def_id(item.id)); self.define(parent, ident, TypeNS, (res, vis, sp, expansion)); } ItemKind::Enum(ref enum_definition, _) => { let module_kind = ModuleKind::Def( DefKind::Enum, self.definitions.local_def_id(item.id), ident.name, ); let module = self.new_module(parent, module_kind, parent.normal_ancestor_id, expansion, item.span); self.define(parent, ident, TypeNS, (module, vis, sp, expansion)); for variant in &(*enum_definition).variants { self.build_reduced_graph_for_variant(variant, module, vis, expansion); } } ItemKind::TraitAlias(..) => { let res = Res::Def(DefKind::TraitAlias, self.definitions.local_def_id(item.id)); self.define(parent, ident, TypeNS, (res, vis, sp, expansion)); } // These items live in both the type and value namespaces. ItemKind::Struct(ref struct_def, _) => { // Define a name in the type namespace. let def_id = self.definitions.local_def_id(item.id); let res = Res::Def(DefKind::Struct, def_id); self.define(parent, ident, TypeNS, (res, vis, sp, expansion)); let mut ctor_vis = vis; let has_non_exhaustive = attr::contains_name(&item.attrs, sym::non_exhaustive); // If the structure is marked as non_exhaustive then lower the visibility // to within the crate. if has_non_exhaustive && vis == ty::Visibility::Public { ctor_vis = ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)); } // Record field names for error reporting. let field_names = struct_def.fields().iter().filter_map(|field| { let field_vis = self.resolve_visibility(&field.vis); if ctor_vis.is_at_least(field_vis, &*self) { ctor_vis = field_vis; } field.ident.map(|ident| ident.name) }).collect(); let item_def_id = self.definitions.local_def_id(item.id); self.insert_field_names(item_def_id, field_names); // If this is a tuple or unit struct, define a name // in the value namespace as well. if let Some(ctor_node_id) = struct_def.ctor_id() { let ctor_res = Res::Def( DefKind::Ctor(CtorOf::Struct, CtorKind::from_ast(struct_def)), self.definitions.local_def_id(ctor_node_id), ); self.define(parent, ident, ValueNS, (ctor_res, ctor_vis, sp, expansion)); self.struct_constructors.insert(res.def_id(), (ctor_res, ctor_vis)); } } ItemKind::Union(ref vdata, _) => { let res = Res::Def(DefKind::Union, self.definitions.local_def_id(item.id)); self.define(parent, ident, TypeNS, (res, vis, sp, expansion)); // Record field names for error reporting. let field_names = vdata.fields().iter().filter_map(|field| { self.resolve_visibility(&field.vis); field.ident.map(|ident| ident.name) }).collect(); let item_def_id = self.definitions.local_def_id(item.id); self.insert_field_names(item_def_id, field_names); } ItemKind::Impl(..) => {} ItemKind::Trait(..) => { let def_id = self.definitions.local_def_id(item.id); // Add all the items within to a new module. let module_kind = ModuleKind::Def(DefKind::Trait, def_id, ident.name); let module = self.new_module(parent, module_kind, parent.normal_ancestor_id, expansion, item.span); self.define(parent, ident, TypeNS, (module, vis, sp, expansion)); self.current_module = module; } ItemKind::MacroDef(..) | ItemKind::Mac(_) => unreachable!(), } }
macro_rules.rs
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use {ast, attr}; use syntax_pos::{Span, DUMMY_SP}; use ext::base::{DummyResult, ExtCtxt, MacResult, SyntaxExtension}; use ext::base::{NormalTT, TTMacroExpander}; use ext::expand::{Expansion, ExpansionKind}; use ext::tt::macro_parser::{Success, Error, Failure}; use ext::tt::macro_parser::{MatchedSeq, MatchedNonterminal}; use ext::tt::macro_parser::{parse, parse_failure_msg}; use ext::tt::quoted; use ext::tt::transcribe::transcribe; use feature_gate::{self, emit_feature_err, Features, GateIssue}; use parse::{Directory, ParseSess}; use parse::parser::Parser; use parse::token::{self, NtTT}; use parse::token::Token::*; use symbol::Symbol; use tokenstream::{TokenStream, TokenTree}; use std::cell::RefCell; use std::collections::HashMap; use std::collections::hash_map::Entry; use std::rc::Rc; pub struct ParserAnyMacro<'a> { parser: Parser<'a>, /// Span of the expansion site of the macro this parser is for site_span: Span, /// The ident of the macro we're parsing macro_ident: ast::Ident } impl<'a> ParserAnyMacro<'a> { pub fn make(mut self: Box<ParserAnyMacro<'a>>, kind: ExpansionKind) -> Expansion { let ParserAnyMacro { site_span, macro_ident, ref mut parser } = *self; let expansion = panictry!(parser.parse_expansion(kind, true)); // We allow semicolons at the end of expressions -- e.g. the semicolon in // `macro_rules! m { () => { panic!(); } }` isn't parsed by `.parse_expr()`, // but `m!()` is allowed in expression positions (c.f. issue #34706). if kind == ExpansionKind::Expr && parser.token == token::Semi { parser.bump(); } // Make sure we don't have any tokens left to parse so we don't silently drop anything. let path = ast::Path::from_ident(site_span, macro_ident); parser.ensure_complete_parse(&path, kind.name(), site_span); expansion } } struct MacroRulesMacroExpander { name: ast::Ident, lhses: Vec<quoted::TokenTree>, rhses: Vec<quoted::TokenTree>, valid: bool, } impl TTMacroExpander for MacroRulesMacroExpander { fn expand<'cx>(&self, cx: &'cx mut ExtCtxt, sp: Span, input: TokenStream) -> Box<MacResult+'cx> { if !self.valid { return DummyResult::any(sp); } generic_extension(cx, sp, self.name, input, &self.lhses, &self.rhses) } } fn trace_macros_note(cx: &mut ExtCtxt, sp: Span, message: String) { let sp = sp.macro_backtrace().last().map(|trace| trace.call_site).unwrap_or(sp); let mut values: &mut Vec<String> = cx.expansions.entry(sp).or_insert_with(Vec::new); values.push(message); } /// Given `lhses` and `rhses`, this is the new macro we create fn generic_extension<'cx>(cx: &'cx mut ExtCtxt, sp: Span, name: ast::Ident, arg: TokenStream, lhses: &[quoted::TokenTree], rhses: &[quoted::TokenTree]) -> Box<MacResult+'cx> { if cx.trace_macros() { trace_macros_note(cx, sp, format!("expanding `{}! {{ {} }}`", name, arg)); } // Which arm's failure should we report? (the one furthest along) let mut best_fail_spot = DUMMY_SP; let mut best_fail_tok = None; for (i, lhs) in lhses.iter().enumerate() { // try each arm's matchers let lhs_tt = match *lhs { quoted::TokenTree::Delimited(_, ref delim) => &delim.tts[..], _ => cx.span_bug(sp, "malformed macro lhs") }; match TokenTree::parse(cx, lhs_tt, arg.clone()) { Success(named_matches) => { let rhs = match rhses[i] { // ignore delimiters quoted::TokenTree::Delimited(_, ref delimed) => delimed.tts.clone(), _ => cx.span_bug(sp, "malformed macro rhs"), }; // rhs has holes ( `$id` and `$(...)` that need filled) let tts = transcribe(cx, Some(named_matches), rhs); if cx.trace_macros() { trace_macros_note(cx, sp, format!("to `{}`", tts)); } let directory = Directory { path: cx.current_expansion.module.directory.clone(), ownership: cx.current_expansion.directory_ownership, }; let mut p = Parser::new(cx.parse_sess(), tts, Some(directory), true, false); p.root_module_name = cx.current_expansion.module.mod_path.last() .map(|id| id.name.as_str().to_string()); p.process_potential_macro_variable(); // Let the context choose how to interpret the result. // Weird, but useful for X-macros. return Box::new(ParserAnyMacro { parser: p, // Pass along the original expansion site and the name of the macro // so we can print a useful error message if the parse of the expanded // macro leaves unparsed tokens. site_span: sp, macro_ident: name }) } Failure(sp, tok) => if sp.lo >= best_fail_spot.lo { best_fail_spot = sp; best_fail_tok = Some(tok); }, Error(err_sp, ref msg) => { cx.span_fatal(err_sp.substitute_dummy(sp), &msg[..]) } } } let best_fail_msg = parse_failure_msg(best_fail_tok.expect("ran no matchers")); cx.span_fatal(best_fail_spot.substitute_dummy(sp), &best_fail_msg); } // Note that macro-by-example's input is also matched against a token tree: // $( $lhs:tt => $rhs:tt );+ // // Holy self-referential! /// Converts a `macro_rules!` invocation into a syntax extension. pub fn compile(sess: &ParseSess, features: &RefCell<Features>, def: &ast::Item) -> SyntaxExtension { let lhs_nm = ast::Ident::with_empty_ctxt(Symbol::gensym("lhs")); let rhs_nm = ast::Ident::with_empty_ctxt(Symbol::gensym("rhs")); // Parse the macro_rules! invocation let body = match def.node { ast::ItemKind::MacroDef(ref body) => body, _ => unreachable!(), }; // The pattern that macro_rules matches. // The grammar for macro_rules! is: // $( $lhs:tt => $rhs:tt );+ // ...quasiquoting this would be nice. // These spans won't matter, anyways let argument_gram = vec![ quoted::TokenTree::Sequence(DUMMY_SP, Rc::new(quoted::SequenceRepetition { tts: vec![ quoted::TokenTree::MetaVarDecl(DUMMY_SP, lhs_nm, ast::Ident::from_str("tt")), quoted::TokenTree::Token(DUMMY_SP, token::FatArrow), quoted::TokenTree::MetaVarDecl(DUMMY_SP, rhs_nm, ast::Ident::from_str("tt")), ], separator: Some(if body.legacy { token::Semi } else { token::Comma }), op: quoted::KleeneOp::OneOrMore, num_captures: 2, })), // to phase into semicolon-termination instead of semicolon-separation quoted::TokenTree::Sequence(DUMMY_SP, Rc::new(quoted::SequenceRepetition { tts: vec![quoted::TokenTree::Token(DUMMY_SP, token::Semi)], separator: None, op: quoted::KleeneOp::ZeroOrMore, num_captures: 0 })), ]; let argument_map = match parse(sess, body.stream(), &argument_gram, None, true) { Success(m) => m, Failure(sp, tok) => { let s = parse_failure_msg(tok); panic!(sess.span_diagnostic.span_fatal(sp.substitute_dummy(def.span), &s)); } Error(sp, s) => { panic!(sess.span_diagnostic.span_fatal(sp.substitute_dummy(def.span), &s)); } }; let mut valid = true; // Extract the arguments: let lhses = match *argument_map[&lhs_nm] { MatchedSeq(ref s, _) => { s.iter().map(|m| { if let MatchedNonterminal(ref nt) = *m { if let NtTT(ref tt) = **nt { let tt = quoted::parse(tt.clone().into(), true, sess).pop().unwrap(); valid &= check_lhs_nt_follows(sess, features, &def.attrs, &tt); return tt; } } sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs") }).collect::<Vec<quoted::TokenTree>>() } _ => sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs") }; let rhses = match *argument_map[&rhs_nm] { MatchedSeq(ref s, _) => { s.iter().map(|m| { if let MatchedNonterminal(ref nt) = *m { if let NtTT(ref tt) = **nt { return quoted::parse(tt.clone().into(), false, sess).pop().unwrap(); } } sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs") }).collect::<Vec<quoted::TokenTree>>() } _ => sess.span_diagnostic.span_bug(def.span, "wrong-structured rhs") }; for rhs in &rhses { valid &= check_rhs(sess, rhs); } // don't abort iteration early, so that errors for multiple lhses can be reported for lhs in &lhses { valid &= check_lhs_no_empty_seq(sess, &[lhs.clone()]) } let exp: Box<_> = Box::new(MacroRulesMacroExpander { name: def.ident, lhses: lhses, rhses: rhses, valid: valid, }); if body.legacy { let allow_internal_unstable = attr::contains_name(&def.attrs, "allow_internal_unstable"); NormalTT(exp, Some((def.id, def.span)), allow_internal_unstable) } else { SyntaxExtension::DeclMacro(exp, Some((def.id, def.span))) } } fn check_lhs_nt_follows(sess: &ParseSess, features: &RefCell<Features>, attrs: &[ast::Attribute], lhs: &quoted::TokenTree) -> bool { // lhs is going to be like TokenTree::Delimited(...), where the // entire lhs is those tts. Or, it can be a "bare sequence", not wrapped in parens. if let quoted::TokenTree::Delimited(_, ref tts) = *lhs { check_matcher(sess, features, attrs, &tts.tts) } else { let msg = "invalid macro matcher; matchers must be contained in balanced delimiters"; sess.span_diagnostic.span_err(lhs.span(), msg); false } // we don't abort on errors on rejection, the driver will do that for us // after parsing/expansion. we can report every error in every macro this way. } /// Check that the lhs contains no repetition which could match an empty token /// tree, because then the matcher would hang indefinitely. fn check_lhs_no_empty_seq(sess: &ParseSess, tts: &[quoted::TokenTree]) -> bool { use self::quoted::TokenTree; for tt in tts { match *tt { TokenTree::Token(..) | TokenTree::MetaVar(..) | TokenTree::MetaVarDecl(..) => (), TokenTree::Delimited(_, ref del) => if !check_lhs_no_empty_seq(sess, &del.tts) { return false; }, TokenTree::Sequence(span, ref seq) => { if seq.separator.is_none() && seq.tts.iter().all(|seq_tt| { match *seq_tt { TokenTree::MetaVarDecl(_, _, id) => id.name == "vis", TokenTree::Sequence(_, ref sub_seq) => sub_seq.op == quoted::KleeneOp::ZeroOrMore, _ => false, } }) { sess.span_diagnostic.span_err(span, "repetition matches empty token tree"); return false; } if !check_lhs_no_empty_seq(sess, &seq.tts) { return false; } } } } true } fn check_rhs(sess: &ParseSess, rhs: &quoted::TokenTree) -> bool { match *rhs { quoted::TokenTree::Delimited(..) => return true, _ => sess.span_diagnostic.span_err(rhs.span(), "macro rhs must be delimited") } false } fn check_matcher(sess: &ParseSess, features: &RefCell<Features>, attrs: &[ast::Attribute], matcher: &[quoted::TokenTree]) -> bool { let first_sets = FirstSets::new(matcher); let empty_suffix = TokenSet::empty(); let err = sess.span_diagnostic.err_count(); check_matcher_core(sess, features, attrs, &first_sets, matcher, &empty_suffix); err == sess.span_diagnostic.err_count() } // The FirstSets for a matcher is a mapping from subsequences in the // matcher to the FIRST set for that subsequence. // // This mapping is partially precomputed via a backwards scan over the // token trees of the matcher, which provides a mapping from each // repetition sequence to its FIRST set. // // (Hypothetically sequences should be uniquely identifiable via their // spans, though perhaps that is false e.g. for macro-generated macros // that do not try to inject artificial span information. My plan is // to try to catch such cases ahead of time and not include them in // the precomputed mapping.) struct FirstSets { // this maps each TokenTree::Sequence `$(tt ...) SEP OP` that is uniquely identified by its // span in the original matcher to the First set for the inner sequence `tt ...`. // // If two sequences have the same span in a matcher, then map that // span to None (invalidating the mapping here and forcing the code to // use a slow path). first: HashMap<Span, Option<TokenSet>>, } impl FirstSets { fn new(tts: &[quoted::TokenTree]) -> FirstSets { use self::quoted::TokenTree; let mut sets = FirstSets { first: HashMap::new() }; build_recur(&mut sets, tts); return sets; // walks backward over `tts`, returning the FIRST for `tts` // and updating `sets` at the same time for all sequence // substructure we find within `tts`. fn build_recur(sets: &mut FirstSets, tts: &[TokenTree]) -> TokenSet { let mut first = TokenSet::empty(); for tt in tts.iter().rev() { match *tt { TokenTree::Token(..) | TokenTree::MetaVar(..) | TokenTree::MetaVarDecl(..) => { first.replace_with(tt.clone()); } TokenTree::Delimited(span, ref delimited) => { build_recur(sets, &delimited.tts[..]); first.replace_with(delimited.open_tt(span)); } TokenTree::Sequence(sp, ref seq_rep) => { let subfirst = build_recur(sets, &seq_rep.tts[..]); match sets.first.entry(sp) { Entry::Vacant(vac) => { vac.insert(Some(subfirst.clone())); } Entry::Occupied(mut occ) => { // if there is already an entry, then a span must have collided. // This should not happen with typical macro_rules macros, // but syntax extensions need not maintain distinct spans, // so distinct syntax trees can be assigned the same span. // In such a case, the map cannot be trusted; so mark this // entry as unusable. occ.insert(None); } } // If the sequence contents can be empty, then the first // token could be the separator token itself. if let (Some(ref sep), true) = (seq_rep.separator.clone(), subfirst.maybe_empty) { first.add_one_maybe(TokenTree::Token(sp, sep.clone())); } // Reverse scan: Sequence comes before `first`. if subfirst.maybe_empty || seq_rep.op == quoted::KleeneOp::ZeroOrMore { // If sequence is potentially empty, then // union them (preserving first emptiness). first.add_all(&TokenSet { maybe_empty: true, ..subfirst }); } else { // Otherwise, sequence guaranteed // non-empty; replace first. first = subfirst; } } } } first } } // walks forward over `tts` until all potential FIRST tokens are // identified. fn first(&self, tts: &[quoted::TokenTree]) -> TokenSet
} // A set of `quoted::TokenTree`s, which may include `TokenTree::Match`s // (for macro-by-example syntactic variables). It also carries the // `maybe_empty` flag; that is true if and only if the matcher can // match an empty token sequence. // // The First set is computed on submatchers like `$($a:expr b),* $(c)* d`, // which has corresponding FIRST = {$a:expr, c, d}. // Likewise, `$($a:expr b),* $(c)+ d` has FIRST = {$a:expr, c}. // // (Notably, we must allow for *-op to occur zero times.) #[derive(Clone, Debug)] struct TokenSet { tokens: Vec<quoted::TokenTree>, maybe_empty: bool, } impl TokenSet { // Returns a set for the empty sequence. fn empty() -> Self { TokenSet { tokens: Vec::new(), maybe_empty: true } } // Returns the set `{ tok }` for the single-token (and thus // non-empty) sequence [tok]. fn singleton(tok: quoted::TokenTree) -> Self { TokenSet { tokens: vec![tok], maybe_empty: false } } // Changes self to be the set `{ tok }`. // Since `tok` is always present, marks self as non-empty. fn replace_with(&mut self, tok: quoted::TokenTree) { self.tokens.clear(); self.tokens.push(tok); self.maybe_empty = false; } // Changes self to be the empty set `{}`; meant for use when // the particular token does not matter, but we want to // record that it occurs. fn replace_with_irrelevant(&mut self) { self.tokens.clear(); self.maybe_empty = false; } // Adds `tok` to the set for `self`, marking sequence as non-empy. fn add_one(&mut self, tok: quoted::TokenTree) { if !self.tokens.contains(&tok) { self.tokens.push(tok); } self.maybe_empty = false; } // Adds `tok` to the set for `self`. (Leaves `maybe_empty` flag alone.) fn add_one_maybe(&mut self, tok: quoted::TokenTree) { if !self.tokens.contains(&tok) { self.tokens.push(tok); } } // Adds all elements of `other` to this. // // (Since this is a set, we filter out duplicates.) // // If `other` is potentially empty, then preserves the previous // setting of the empty flag of `self`. If `other` is guaranteed // non-empty, then `self` is marked non-empty. fn add_all(&mut self, other: &Self) { for tok in &other.tokens { if !self.tokens.contains(tok) { self.tokens.push(tok.clone()); } } if !other.maybe_empty { self.maybe_empty = false; } } } // Checks that `matcher` is internally consistent and that it // can legally by followed by a token N, for all N in `follow`. // (If `follow` is empty, then it imposes no constraint on // the `matcher`.) // // Returns the set of NT tokens that could possibly come last in // `matcher`. (If `matcher` matches the empty sequence, then // `maybe_empty` will be set to true.) // // Requires that `first_sets` is pre-computed for `matcher`; // see `FirstSets::new`. fn check_matcher_core(sess: &ParseSess, features: &RefCell<Features>, attrs: &[ast::Attribute], first_sets: &FirstSets, matcher: &[quoted::TokenTree], follow: &TokenSet) -> TokenSet { use self::quoted::TokenTree; let mut last = TokenSet::empty(); // 2. For each token and suffix [T, SUFFIX] in M: // ensure that T can be followed by SUFFIX, and if SUFFIX may be empty, // then ensure T can also be followed by any element of FOLLOW. 'each_token: for i in 0..matcher.len() { let token = &matcher[i]; let suffix = &matcher[i+1..]; let build_suffix_first = || { let mut s = first_sets.first(suffix); if s.maybe_empty { s.add_all(follow); } s }; // (we build `suffix_first` on demand below; you can tell // which cases are supposed to fall through by looking for the // initialization of this variable.) let suffix_first; // First, update `last` so that it corresponds to the set // of NT tokens that might end the sequence `... token`. match *token { TokenTree::Token(..) | TokenTree::MetaVar(..) | TokenTree::MetaVarDecl(..) => { let can_be_followed_by_any; if let Err(bad_frag) = has_legal_fragment_specifier(sess, features, attrs, token) { let msg = format!("invalid fragment specifier `{}`", bad_frag); sess.span_diagnostic.struct_span_err(token.span(), &msg) .help("valid fragment specifiers are `ident`, `block`, `stmt`, `expr`, \ `pat`, `ty`, `path`, `meta`, `tt`, `item` and `vis`") .emit(); // (This eliminates false positives and duplicates // from error messages.) can_be_followed_by_any = true; } else { can_be_followed_by_any = token_can_be_followed_by_any(token); } if can_be_followed_by_any { // don't need to track tokens that work with any, last.replace_with_irrelevant(); // ... and don't need to check tokens that can be // followed by anything against SUFFIX. continue 'each_token; } else { last.replace_with(token.clone()); suffix_first = build_suffix_first(); } } TokenTree::Delimited(span, ref d) => { let my_suffix = TokenSet::singleton(d.close_tt(span)); check_matcher_core(sess, features, attrs, first_sets, &d.tts, &my_suffix); // don't track non NT tokens last.replace_with_irrelevant(); // also, we don't need to check delimited sequences // against SUFFIX continue 'each_token; } TokenTree::Sequence(sp, ref seq_rep) => { suffix_first = build_suffix_first(); // The trick here: when we check the interior, we want // to include the separator (if any) as a potential // (but not guaranteed) element of FOLLOW. So in that // case, we make a temp copy of suffix and stuff // delimiter in there. // // FIXME: Should I first scan suffix_first to see if // delimiter is already in it before I go through the // work of cloning it? But then again, this way I may // get a "tighter" span? let mut new; let my_suffix = if let Some(ref u) = seq_rep.separator { new = suffix_first.clone(); new.add_one_maybe(TokenTree::Token(sp, u.clone())); &new } else { &suffix_first }; // At this point, `suffix_first` is built, and // `my_suffix` is some TokenSet that we can use // for checking the interior of `seq_rep`. let next = check_matcher_core(sess, features, attrs, first_sets, &seq_rep.tts, my_suffix); if next.maybe_empty { last.add_all(&next); } else { last = next; } // the recursive call to check_matcher_core already ran the 'each_last // check below, so we can just keep going forward here. continue 'each_token; } } // (`suffix_first` guaranteed initialized once reaching here.) // Now `last` holds the complete set of NT tokens that could // end the sequence before SUFFIX. Check that every one works with `suffix`. 'each_last: for token in &last.tokens { if let TokenTree::MetaVarDecl(_, ref name, ref frag_spec) = *token { for next_token in &suffix_first.tokens { match is_in_follow(next_token, &frag_spec.name.as_str()) { Err((msg, help)) => { sess.span_diagnostic.struct_span_err(next_token.span(), &msg) .help(help).emit(); // don't bother reporting every source of // conflict for a particular element of `last`. continue 'each_last; } Ok(true) => {} Ok(false) => { let may_be = if last.tokens.len() == 1 && suffix_first.tokens.len() == 1 { "is" } else { "may be" }; sess.span_diagnostic.span_err( next_token.span(), &format!("`${name}:{frag}` {may_be} followed by `{next}`, which \ is not allowed for `{frag}` fragments", name=name, frag=frag_spec, next=quoted_tt_to_string(next_token), may_be=may_be) ); } } } } } } last } fn token_can_be_followed_by_any(tok: &quoted::TokenTree) -> bool { if let quoted::TokenTree::MetaVarDecl(_, _, frag_spec) = *tok { frag_can_be_followed_by_any(&frag_spec.name.as_str()) } else { // (Non NT's can always be followed by anthing in matchers.) true } } /// True if a fragment of type `frag` can be followed by any sort of /// token. We use this (among other things) as a useful approximation /// for when `frag` can be followed by a repetition like `$(...)*` or /// `$(...)+`. In general, these can be a bit tricky to reason about, /// so we adopt a conservative position that says that any fragment /// specifier which consumes at most one token tree can be followed by /// a fragment specifier (indeed, these fragments can be followed by /// ANYTHING without fear of future compatibility hazards). fn frag_can_be_followed_by_any(frag: &str) -> bool { match frag { "item" | // always terminated by `}` or `;` "block" | // exactly one token tree "ident" | // exactly one token tree "meta" | // exactly one token tree "tt" => // exactly one token tree true, _ => false, } } /// True if `frag` can legally be followed by the token `tok`. For /// fragments that can consume an unbounded number of tokens, `tok` /// must be within a well-defined follow set. This is intended to /// guarantee future compatibility: for example, without this rule, if /// we expanded `expr` to include a new binary operator, we might /// break macros that were relying on that binary operator as a /// separator. // when changing this do not forget to update doc/book/macros.md! fn is_in_follow(tok: &quoted::TokenTree, frag: &str) -> Result<bool, (String, &'static str)> { use self::quoted::TokenTree; if let TokenTree::Token(_, token::CloseDelim(_)) = *tok { // closing a token tree can never be matched by any fragment; // iow, we always require that `(` and `)` match, etc. Ok(true) } else { match frag { "item" => { // since items *must* be followed by either a `;` or a `}`, we can // accept anything after them Ok(true) }, "block" => { // anything can follow block, the braces provide an easy boundary to // maintain Ok(true) }, "stmt" | "expr" => match *tok { TokenTree::Token(_, ref tok) => match *tok { FatArrow | Comma | Semi => Ok(true), _ => Ok(false) }, _ => Ok(false), }, "pat" => match *tok { TokenTree::Token(_, ref tok) => match *tok { FatArrow | Comma | Eq | BinOp(token::Or) => Ok(true), Ident(i) if i.name == "if" || i.name == "in" => Ok(true), _ => Ok(false) }, _ => Ok(false), }, "path" | "ty" => match *tok { TokenTree::Token(_, ref tok) => match *tok { OpenDelim(token::DelimToken::Brace) | OpenDelim(token::DelimToken::Bracket) | Comma | FatArrow | Colon | Eq | Gt | Semi | BinOp(token::Or) => Ok(true), Ident(i) if i.name == "as" || i.name == "where" => Ok(true), _ => Ok(false) }, TokenTree::MetaVarDecl(_, _, frag) if frag.name == "block" => Ok(true), _ => Ok(false), }, "ident" => { // being a single token, idents are harmless Ok(true) }, "meta" | "tt" => { // being either a single token or a delimited sequence, tt is // harmless Ok(true) }, "vis" => { // Explicitly disallow `priv`, on the off chance it comes back. match *tok { TokenTree::Token(_, ref tok) => match *tok { Comma => Ok(true), Ident(i) if i.name != "priv" => Ok(true), ref tok => Ok(tok.can_begin_type()) }, TokenTree::MetaVarDecl(_, _, frag) if frag.name == "ident" || frag.name == "ty" || frag.name == "path" => Ok(true), _ => Ok(false) } }, "" => Ok(true), // keywords::Invalid _ => Err((format!("invalid fragment specifier `{}`", frag), "valid fragment specifiers are `ident`, `block`, \ `stmt`, `expr`, `pat`, `ty`, `path`, `meta`, `tt`, \ `item` and `vis`")) } } } fn has_legal_fragment_specifier(sess: &ParseSess, features: &RefCell<Features>, attrs: &[ast::Attribute], tok: &quoted::TokenTree) -> Result<(), String> { debug!("has_legal_fragment_specifier({:?})", tok); if let quoted::TokenTree::MetaVarDecl(_, _, ref frag_spec) = *tok { let frag_name = frag_spec.name.as_str(); let frag_span = tok.span(); if !is_legal_fragment_specifier(sess, features, attrs, &frag_name, frag_span) { return Err(frag_name.to_string()); } } Ok(()) } fn is_legal_fragment_specifier(sess: &ParseSess, features: &RefCell<Features>, attrs: &[ast::Attribute], frag_name: &str, frag_span: Span) -> bool { match frag_name { "item" | "block" | "stmt" | "expr" | "pat" | "path" | "ty" | "ident" | "meta" | "tt" | "" => true, "vis" => { if !features.borrow().macro_vis_matcher && !attr::contains_name(attrs, "allow_internal_unstable") { let explain = feature_gate::EXPLAIN_VIS_MATCHER; emit_feature_err(sess, "macro_vis_matcher", frag_span, GateIssue::Language, explain); } true }, _ => false, } } fn quoted_tt_to_string(tt: &quoted::TokenTree) -> String { match *tt { quoted::TokenTree::Token(_, ref tok) => ::print::pprust::token_to_string(tok), quoted::TokenTree::MetaVar(_, name) => format!("${}", name), quoted::TokenTree::MetaVarDecl(_, name, kind) => format!("${}:{}", name, kind), _ => panic!("unexpected quoted::TokenTree::{{Sequence or Delimited}} \ in follow set checker"), } }
{ use self::quoted::TokenTree; let mut first = TokenSet::empty(); for tt in tts.iter() { assert!(first.maybe_empty); match *tt { TokenTree::Token(..) | TokenTree::MetaVar(..) | TokenTree::MetaVarDecl(..) => { first.add_one(tt.clone()); return first; } TokenTree::Delimited(span, ref delimited) => { first.add_one(delimited.open_tt(span)); return first; } TokenTree::Sequence(sp, ref seq_rep) => { match self.first.get(&sp) { Some(&Some(ref subfirst)) => { // If the sequence contents can be empty, then the first // token could be the separator token itself. if let (Some(ref sep), true) = (seq_rep.separator.clone(), subfirst.maybe_empty) { first.add_one_maybe(TokenTree::Token(sp, sep.clone())); } assert!(first.maybe_empty); first.add_all(subfirst); if subfirst.maybe_empty || seq_rep.op == quoted::KleeneOp::ZeroOrMore { // continue scanning for more first // tokens, but also make sure we // restore empty-tracking state first.maybe_empty = true; continue; } else { return first; } } Some(&None) => { panic!("assume all sequences have (unique) spans for now"); } None => { panic!("We missed a sequence during FirstSets construction"); } } } } } // we only exit the loop if `tts` was empty or if every // element of `tts` matches the empty sequence. assert!(first.maybe_empty); first }
dropdown.tsx
import React from 'react'; function DropDown(): JSX.Element {
<svg aria-hidden='true' xmlns='http://www.w3.org/2000/svg' width='10' height='10' viewBox='0 0 389 254' fill='none' > <path d='M194.5 0L388.5 254H307.5L194.5 99L78.5 254H0.5L194.5 0Z' style={{ stroke: 'var(--primary-color)', fill: 'var(--primary-color)', strokeWidth: '1px' }} /> </svg> ); } DropDown.displayName = 'DropDown'; export default DropDown;
return (
models.py
from . import db from werkzeug.security import generate_password_hash, check_password_hash from flask_login import UserMixin from . import login_manager from datetime import datetime @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id)) class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(255)) pass_secure = db.Column(db.String(255)) email = db.Column(db.String(255), unique=True, index=True) bio = db.Column(db.String(255)) profile_pic_path = db.Column(db.String()) password_hash = db.Column(db.String(255)) posts = db.relationship('Post', backref='author', lazy='dynamic') role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) @property def password(self): raise AttributeError('You cannot read the password attribute') @password.setter def password(self, password): self.pass_secure = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.pass_secure, password) def __repr__(self): return f'User {self.username}' class Role(db.Model): __tablename__='roles' id=db.Column(db.Integer, primary_key=True) name=db.Column(db.String(255), unique=True) default = db.Column(db.Boolean, default=False, index=True) permissions = db.Column(db.Integer) users = db.relationship('User', backref='role', lazy='dynamic') class Post(db.Model): __tablename__ = 'posts' id = db.Column(db.Integer, primary_key=True) body = db.Column(db.Text) timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow) author_id = db.Column(db.Integer, db.ForeignKey('users.id')) def save_post(self): '''
db.session.commit()
Function to save a new blog. ''' db.session.add(self)
settings.rs
use std::{ num::{NonZeroU64, NonZeroUsize}, path::{Path, PathBuf}, time::Duration, vec::Vec, }; use config::{Config, ConfigError, Environment, File, FileFormat}; use serde::Deserialize; use mqtt_util::{CredentialProviderSettings, Credentials}; use crate::persist::FlushOptions; pub const DEFAULTS: &str = include_str!("../config/default.json"); const DEFAULT_UPSTREAM_PORT: &str = "8883"; #[derive(Debug, Clone, PartialEq)] pub struct BridgeSettings { upstream: Option<ConnectionSettings>, remotes: Vec<ConnectionSettings>, messages: MessagesSettings, storage: StorageSettings, } impl BridgeSettings { pub fn new() -> Result<Self, ConfigError> { let mut config = Config::new(); config.merge(File::from_str(DEFAULTS, FileFormat::Json))?; config.merge(Environment::new())?; config.try_into() } pub fn from_file<P>(path: P) -> Result<Self, ConfigError> where P: AsRef<Path>, { let mut config = Config::new(); config.merge(File::from_str(DEFAULTS, FileFormat::Json))?; config.merge(File::from(path.as_ref()))?; config.merge(Environment::new())?; config.try_into() } pub fn from_upstream_details( addr: String, credentials: Credentials, subs: Vec<Direction>, clean_session: bool, keep_alive: Duration, storage_dir_override: &PathBuf, ) -> Result<Self, ConfigError> { let mut this = Self::new()?; let upstream_connection_settings = ConnectionSettings { name: "$upstream".into(), address: addr, subscriptions: subs, credentials, clean_session, keep_alive, }; this.upstream = Some(upstream_connection_settings); let mut storage = this.storage.clone(); if let StorageSettings::RingBuffer(ref mut ring_buffer_settings) = storage { ring_buffer_settings.directory = storage_dir_override.clone(); this.storage = storage.clone(); } Ok(this) } pub fn upstream(&self) -> Option<&ConnectionSettings> { self.upstream.as_ref() } pub fn remotes(&self) -> &Vec<ConnectionSettings> { &self.remotes } pub fn messages(&self) -> &MessagesSettings { &self.messages } pub fn storage(&self) -> &StorageSettings { &self.storage } } impl<'de> serde::Deserialize<'de> for BridgeSettings { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de>, { #[derive(Debug, serde_derive::Deserialize)] struct Inner { #[serde(flatten)] nested_bridge: Option<CredentialProviderSettings>, upstream: UpstreamSettings, remotes: Vec<ConnectionSettings>, messages: MessagesSettings, storage: StorageSettings, } let Inner { nested_bridge, upstream, remotes, messages, storage, } = serde::Deserialize::deserialize(deserializer)?; let upstream_connection_settings = nested_bridge.map(|nested_bridge| ConnectionSettings { name: "$upstream".into(), address: format!( "{}:{}", nested_bridge.gateway_hostname(), DEFAULT_UPSTREAM_PORT ), subscriptions: upstream.subscriptions, credentials: Credentials::Provider(nested_bridge), clean_session: upstream.clean_session, keep_alive: upstream.keep_alive, }); Ok(BridgeSettings { upstream: upstream_connection_settings, remotes, messages, storage, }) } } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct ConnectionSettings { name: String, address: String, #[serde(flatten)] credentials: Credentials, subscriptions: Vec<Direction>, #[serde(with = "humantime_serde")] keep_alive: Duration, clean_session: bool, } impl ConnectionSettings { pub fn name(&self) -> &str { &self.name } pub fn address(&self) -> &str { &self.address } pub fn credentials(&self) -> &Credentials { &self.credentials } pub fn subscriptions(&self) -> Vec<TopicRule> { self.subscriptions .iter() .filter_map(|sub| match sub { Direction::In(topic) | Direction::Both(topic) => Some(topic.clone()), _ => None, }) .collect() } pub fn forwards(&self) -> Vec<TopicRule> { self.subscriptions .iter() .filter_map(|sub| match sub { Direction::Out(topic) | Direction::Both(topic) => Some(topic.clone()), _ => None, }) .collect() } pub fn keep_alive(&self) -> Duration { self.keep_alive } pub fn clean_session(&self) -> bool { self.clean_session } } #[derive(Debug, Default, Clone, PartialEq, Deserialize)] pub struct TopicRule { topic: String, #[serde(rename = "outPrefix")] out_prefix: Option<String>, #[serde(rename = "inPrefix")] in_prefix: Option<String>, } impl TopicRule { pub fn new(topic: String, in_prefix: Option<String>, out_prefix: Option<String>) -> Self { Self { topic, out_prefix, in_prefix, } } pub fn topic(&self) -> &str { &self.topic } pub fn out_prefix(&self) -> Option<&str> { self.out_prefix.as_deref().filter(|s| !s.is_empty()) } pub fn in_prefix(&self) -> Option<&str> { self.in_prefix.as_deref().filter(|s| !s.is_empty()) } pub fn subscribe_to(&self) -> String { match &self.in_prefix { Some(local) => { if local.is_empty() { self.topic.clone() } else { format!("{}/{}", local, self.topic) } } None => self.topic.clone(), } } } #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(tag = "direction")] pub enum Direction { #[serde(rename = "in")] In(TopicRule), #[serde(rename = "out")] Out(TopicRule), #[serde(rename = "both")] Both(TopicRule), } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct MessagesSettings {} #[derive(Debug, Clone, PartialEq, Deserialize)] struct UpstreamSettings { #[serde(with = "humantime_serde")] keep_alive: Duration, clean_session: bool, subscriptions: Vec<Direction>, } #[derive(Debug, Clone, PartialEq, Deserialize)] #[serde(tag = "type")] pub enum StorageSettings { #[serde(rename = "memory")] Memory(MemorySettings), #[serde(rename = "ring_buffer")] RingBuffer(RingBufferSettings), } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct MemorySettings { max_size: NonZeroUsize, } impl MemorySettings { pub fn new(max_size: NonZeroUsize) -> Self { Self { max_size } } pub fn max_size(&self) -> NonZeroUsize { self.max_size } } #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct RingBufferSettings { max_file_size: NonZeroU64, directory: PathBuf, flush_options: FlushOptions, } impl RingBufferSettings { pub fn new(max_file_size: NonZeroU64, directory: PathBuf, flush_options: FlushOptions) -> Self { Self { max_file_size, directory, flush_options, } } pub fn max_file_size(&self) -> NonZeroU64 { self.max_file_size } pub fn directory(&self) -> &PathBuf { &self.directory } pub fn flush_options(&self) -> &FlushOptions { &self.flush_options } } #[cfg(test)] mod tests { use config::ConfigError; use matches::assert_matches; use serial_test::serial; use mqtt_broker_tests_util::env; use super::*; #[test] #[serial(env_settings)] fn new_overrides_settings_from_env() { it_overrides_settings_from_env(BridgeSettings::new); } #[test] #[serial(env_settings)] fn new_no_upstream_settings() { let settings = BridgeSettings::new().unwrap(); assert_eq!(settings.remotes().len(), 0); assert_eq!(settings.upstream(), None); } #[test] #[serial(env_settings)] fn new_reads_storage_settings() { let settings = BridgeSettings::new().unwrap(); let storage_settings = settings.storage(); // Should exist from default.json. assert_matches!(storage_settings, StorageSettings::RingBuffer(_)); if let StorageSettings::RingBuffer(rb) = storage_settings { assert_eq!(rb.max_file_size(), NonZeroU64::new(33_554_432).unwrap()); assert_eq!(*rb.directory(), PathBuf::from("/tmp/mqttd/")); assert_eq!(*rb.flush_options(), FlushOptions::AfterEachWrite); } } #[test] #[serial(env_settings)] fn from_file_reads_nested_bridge_settings() { let settings = BridgeSettings::from_file("tests/config.json").unwrap(); let upstream = settings.upstream().unwrap(); assert_eq!(upstream.name(), "$upstream"); assert_eq!(upstream.address(), "edge1:8883"); match upstream.credentials() { Credentials::Provider(provider) => { assert_eq!(provider.iothub_hostname(), "iothub"); assert_eq!(provider.device_id(), "d1"); assert_eq!(provider.module_id(), "mymodule"); assert_eq!(provider.generation_id(), "321"); assert_eq!(provider.workload_uri(), "uri"); } _ => panic!("Expected provider settings"), }; } #[test] #[serial(env_settings)] fn from_file_reads_remotes_settings() { let settings = BridgeSettings::from_file("tests/config.json").unwrap(); let len = settings.remotes().len(); assert_eq!(len, 1); let remote = settings.remotes().first().unwrap(); assert_eq!(remote.name(), "r1"); assert_eq!(remote.address(), "remote:8883"); assert_eq!(remote.keep_alive().as_secs(), 60); assert_eq!(remote.clean_session(), false); match remote.credentials() { Credentials::PlainText(auth_settings) => { assert_eq!(auth_settings.username(), "mymodule"); assert_eq!(auth_settings.password(), "pass"); assert_eq!(auth_settings.client_id(), "client"); } _ => panic!("Expected plaintext settings"), }; } #[test] #[serial(env_settings)] fn from_file_reads_storage_settings_without_explicit_storage() { let settings = BridgeSettings::from_file("tests/config.json").unwrap(); let storage_settings = settings.storage(); // Should exist from default.json. assert_matches!(storage_settings, StorageSettings::RingBuffer(_)); if let StorageSettings::RingBuffer(rb) = storage_settings { assert_eq!(rb.max_file_size(), NonZeroU64::new(33_554_432).unwrap()); assert_eq!(*rb.directory(), PathBuf::from("/tmp/mqttd/")); assert_eq!(*rb.flush_options(), FlushOptions::AfterEachWrite); } } #[test] #[serial(env_settings)] fn from_file_reads_storage_settings_with_memory_override() { let settings = BridgeSettings::from_file("tests/config.memory.json").unwrap(); let storage_settings = settings.storage(); assert_matches!(storage_settings, StorageSettings::Memory(_)); if let StorageSettings::Memory(mem) = storage_settings { assert_eq!(mem.max_size(), NonZeroUsize::new(1024).unwrap()); } } #[test] #[serial(env_settings)] fn from_file_reads_storage_settings_with_ring_buffer_override() { let settings = BridgeSettings::from_file("tests/config.ring_buffer.json").unwrap(); let storage_settings = settings.storage(); assert_matches!(storage_settings, StorageSettings::RingBuffer(_)); if let StorageSettings::RingBuffer(rb) = storage_settings { assert_eq!(rb.max_file_size(), NonZeroU64::new(2048).unwrap()); assert_eq!(*rb.directory(), PathBuf::from("/tmp/mqttd/tests/")); assert_eq!(*rb.flush_options(), FlushOptions::Off); } } #[test] #[serial(env_settings)] fn from_default_sets_keepalive_settings() { let settings = BridgeSettings::from_file("tests/config.json").unwrap(); assert_eq!(settings.upstream().unwrap().keep_alive().as_secs(), 60); } #[test] #[serial(env_settings)] fn from_file_overrides_settings_from_env() { it_overrides_settings_from_env(|| BridgeSettings::from_file("tests/config.json")); } #[test] #[serial(env_settings)] fn from_env_no_gateway_hostname() { let _device_id = env::set_var("IOTEDGE_DEVICEID", "device1"); let _module_id = env::set_var("IOTEDGE_MODULEID", "m1"); let _generation_id = env::set_var("IOTEDGE_MODULEGENERATIONID", "123"); let _workload_uri = env::set_var("IOTEDGE_WORKLOADURI", "workload"); let _iothub_hostname = env::set_var("IOTEDGE_IOTHUBHOSTNAME", "iothub"); let settings = BridgeSettings::new().unwrap(); assert_eq!(settings.upstream(), None); } fn it_overrides_settings_from_env<F>(make_settings: F) where F: FnOnce() -> Result<BridgeSettings, ConfigError>, { let _gateway_hostname = env::set_var("IOTEDGE_GATEWAYHOSTNAME", "upstream"); let _device_id = env::set_var("IOTEDGE_DEVICEID", "device1"); let _module_id = env::set_var("IOTEDGE_MODULEID", "m1"); let _generation_id = env::set_var("IOTEDGE_MODULEGENERATIONID", "123"); let _workload_uri = env::set_var("IOTEDGE_WORKLOADURI", "workload"); let _iothub_hostname = env::set_var("IOTEDGE_IOTHUBHOSTNAME", "iothub");
assert_eq!(upstream.name(), "$upstream"); assert_eq!(upstream.address(), "upstream:8883"); match upstream.credentials() { Credentials::Provider(provider) => { assert_eq!(provider.iothub_hostname(), "iothub"); assert_eq!(provider.device_id(), "device1"); assert_eq!(provider.module_id(), "m1"); assert_eq!(provider.generation_id(), "123"); assert_eq!(provider.workload_uri(), "workload"); } _ => panic!("Expected provider settings"), }; } }
let settings = make_settings().unwrap(); let upstream = settings.upstream().unwrap();
marketdata.py
import sys from abc import ABC from dataclasses import dataclass, field from enum import Enum from typing import List, Tuple from abides_core import Message, NanosecondTime from ..orders import Side @dataclass class MarketDataSubReqMsg(Message, ABC): """ Base class for creating or cancelling market data subscriptions with an ``ExchangeAgent``. Attributes: symbol: The symbol of the security to request a data subscription for. cancel: If True attempts to create a new subscription, if False attempts to cancel an existing subscription. """ symbol: str cancel: bool = False @dataclass class MarketDataFreqBasedSubReqMsg(MarketDataSubReqMsg, ABC): """ Base class for creating or cancelling market data subscriptions with an ``ExchangeAgent``. Attributes: symbol: The symbol of the security to request a data subscription for. cancel: If True attempts to create a new subscription, if False attempts to cancel an existing subscription. freq: The frequency in nanoseconds^-1 at which to receive market updates. """ # Inherited Fields: # symbol: str # cancel: bool = False freq: int = 1 @dataclass class MarketDataEventBasedSubReqMsg(MarketDataSubReqMsg, ABC): """ Base class for creating or cancelling market data subscriptions with an ``ExchangeAgent``. Attributes: symbol: The symbol of the security to request a data subscription for. cancel: If True attempts to create a new subscription, if False attempts to cancel an existing subscription. """ # Inherited Fields: # symbol: str # cancel: bool = False @dataclass class L1SubReqMsg(MarketDataFreqBasedSubReqMsg):
@dataclass class L2SubReqMsg(MarketDataFreqBasedSubReqMsg): """ This message requests the creation or cancellation of a subscription to L2 order book data from an ``ExchangeAgent``. Attributes: symbol: The symbol of the security to request a data subscription for. cancel: If True attempts to create a new subscription, if False attempts to cancel an existing subscription. freq: The frequency in nanoseconds^-1 at which to receive market updates. depth: The maximum number of price levels on both sides of the order book to return data for. Defaults to the entire book. """ # Inherited Fields: # symbol: str # cancel: bool = False # freq: int = 1 depth: int = sys.maxsize @dataclass class L3SubReqMsg(MarketDataFreqBasedSubReqMsg): """ This message requests the creation or cancellation of a subscription to L3 order book data from an ``ExchangeAgent``. Attributes: symbol: The symbol of the security to request a data subscription for. cancel: If True attempts to create a new subscription, if False attempts to cancel an existing subscription. freq: The frequency in nanoseconds^-1 at which to receive market updates. depth: The maximum number of price levels on both sides of the order book to return data for. Defaults to the entire book. """ # Inherited Fields: # symbol: str # cancel: bool = False # freq: int = 1 depth: int = sys.maxsize @dataclass class TransactedVolSubReqMsg(MarketDataFreqBasedSubReqMsg): """ This message requests the creation or cancellation of a subscription to transacted volume order book data from an ``ExchangeAgent``. Attributes: symbol: The symbol of the security to request a data subscription for. cancel: If True attempts to create a new subscription, if False attempts to cancel an existing subscription. freq: The frequency in nanoseconds^-1 at which to receive market updates. lookback: The period in time backwards from the present to sum the transacted volume for. """ # Inherited Fields: # symbol: str # cancel: bool = False # freq: int = 1 lookback: str = "1min" @dataclass class BookImbalanceSubReqMsg(MarketDataEventBasedSubReqMsg): """ This message requests the creation or cancellation of a subscription to book imbalance events. Attributes: symbol: The symbol of the security to request a data subscription for. cancel: If True attempts to create a new subscription, if False attempts to cancel an existing subscription. min_imbalance: The minimum book imbalance needed to trigger this subscription. 0.0 is no imbalance. 1.0 is full imbalance (ie. liquidity drop). """ # Inherited Fields: # symbol: str # cancel: bool = False min_imbalance: float = 1.0 @dataclass class MarketDataMsg(Message, ABC): """ Base class for returning market data subscription results from an ``ExchangeAgent``. The ``last_transaction`` and ``exchange_ts`` fields are not directly related to the subscription data but are included for bookkeeping purposes. Attributes: symbol: The symbol of the security this data is for. last_transaction: The time of the last transaction that happened on the exchange. exchange_ts: The time that the message was sent from the exchange. """ symbol: str last_transaction: int exchange_ts: NanosecondTime @dataclass class MarketDataEventMsg(MarketDataMsg, ABC): """ Base class for returning market data subscription results from an ``ExchangeAgent``. The ``last_transaction`` and ``exchange_ts`` fields are not directly related to the subscription data but are included for bookkeeping purposes. Attributes: symbol: The symbol of the security this data is for. last_transaction: The time of the last transaction that happened on the exchange. exchange_ts: The time that the message was sent from the exchange. stage: The stage of this event (start or finish). """ class Stage(Enum): START = "START" FINISH = "FINISH" stage: Stage @dataclass class L1DataMsg(MarketDataMsg): """ This message returns L1 order book data as part of an L1 data subscription. Attributes: symbol: The symbol of the security this data is for. last_transaction: The time of the last transaction that happened on the exchange. exchange_ts: The time that the message was sent from the exchange. bid: The best bid price and the available volume at that price. ask: The best ask price and the available volume at that price. """ # Inherited Fields: # symbol: str # last_transaction: int # exchange_ts: NanosecondTime bid: Tuple[int, int] ask: Tuple[int, int] @dataclass class L2DataMsg(MarketDataMsg): """ This message returns L2 order book data as part of an L2 data subscription. Attributes: symbol: The symbol of the security this data is for. last_transaction: The time of the last transaction that happened on the exchange. exchange_ts: The time that the message was sent from the exchange. bids: A list of tuples containing the price and available volume at each bid price level. asks: A list of tuples containing the price and available volume at each ask price level. """ # Inherited Fields: # symbol: str # last_transaction: int # exchange_ts: NanosecondTime bids: List[Tuple[int, int]] asks: List[Tuple[int, int]] # TODO: include requested depth @dataclass class L3DataMsg(MarketDataMsg): """ This message returns L3 order book data as part of an L3 data subscription. Attributes: symbol: The symbol of the security this data is for. last_transaction: The time of the last transaction that happened on the exchange. exchange_ts: The time that the message was sent from the exchange. bids: A list of tuples containing the price and a list of order sizes at each bid price level. asks: A list of tuples containing the price and a list of order sizes at each ask price level. """ # Inherited Fields: # symbol: str # last_transaction: int # exchange_ts: NanosecondTime bids: List[Tuple[int, List[int]]] asks: List[Tuple[int, List[int]]] # TODO: include requested depth @dataclass class TransactedVolDataMsg(MarketDataMsg): """ This message returns order book transacted volume data as part of an transacted volume data subscription. Attributes: symbol: The symbol of the security this data is for. last_transaction: The time of the last transaction that happened on the exchange. exchange_ts: The time that the message was sent from the exchange. bid_volume: The total transacted volume of bid orders for the given lookback period. ask_volume: The total transacted volume of ask orders for the given lookback period. """ # Inherited Fields: # symbol: str # last_transaction: int # exchange_ts: NanosecondTime bid_volume: int ask_volume: int # TODO: include lookback period @dataclass class BookImbalanceDataMsg(MarketDataEventMsg): """ Sent when the book imbalance reaches a certain threshold dictated in the subscription request message. Attributes: symbol: The symbol of the security this data is for. last_transaction: The time of the last transaction that happened on the exchange. exchange_ts: The time that the message was sent from the exchange. stage: The stage of this event (start or finish). imbalance: Proportional size of the imbalance. side: Side of the book that the imbalance is towards. """ # Inherited Fields: # symbol: str # last_transaction: int # exchange_ts: pd.Timestamp # stage: MarketDataEventMsg.Stage imbalance: float side: Side
""" This message requests the creation or cancellation of a subscription to L1 order book data from an ``ExchangeAgent``. Attributes: symbol: The symbol of the security to request a data subscription for. cancel: If True attempts to create a new subscription, if False attempts to cancel an existing subscription. freq: The frequency in nanoseconds^-1 at which to receive market updates. """ # Inherited Fields: # symbol: str # cancel: bool = False # freq: int = 1 pass
library-preload.js
jQuery.sap.registerPreloadedModules({ "version":"2.0", "modules":{ "library/d/some.js":function(){/*! * ${copyright} */ console.log("HelloWorld");
} }});
test_nsd4.py
# Copyright (C) 2013 eNovance SAS <[email protected]> # # Author: Artom Lifshitz <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import socket import ssl import eventlet import fixtures from mock import MagicMock from designate import exceptions from designate import objects from designate.tests.test_backend import BackendTestCase from designate.tests import resources from designate.backend import impl_nsd4 class NSD4ServerStub: recved_command = None response = 'ok' keyfile = os.path.join(resources.path, 'ssl', 'nsd_server.key') certfile = os.path.join(resources.path, 'ssl', 'nsd_server.pem') def handle(self, client_sock, client_addr): stream = client_sock.makefile() self.recved_command = stream.readline() stream.write(self.response) stream.flush() def start(self): self.port = 1025 while True: try: eventlet.spawn_n(eventlet.serve, eventlet.wrap_ssl( eventlet.listen(('127.0.0.1', self.port)), keyfile=self.keyfile, certfile=self.certfile, server_side=True), self.handle) break except socket.error: self.port = self.port + 1 def stop(self):
class NSD4Fixture(fixtures.Fixture): def setUp(self): super(NSD4Fixture, self).setUp() self.server = NSD4ServerStub() self.server.start() self.addCleanup(self.tearDown) def tearDown(self): self.server.stop() # NOTE: We'll only test the specifics to the nsd4 backend here. # Rest is handled via scenarios class NSD4BackendTestCase(BackendTestCase): def setUp(self): super(NSD4BackendTestCase, self).setUp() self.server_fixture = NSD4Fixture() self.useFixture(self.server_fixture) keyfile = os.path.join(resources.path, 'ssl', 'nsd_control.key') certfile = os.path.join(resources.path, 'ssl', 'nsd_control.pem') self.target = objects.PoolTarget.from_dict({ 'id': '4588652b-50e7-46b9-b688-a9bad40a873e', 'type': 'nsd4', 'masters': [{'host': '192.0.2.1', 'port': 53}, {'host': '192.0.2.2', 'port': 35}], 'options': [ {'key': 'keyfile', 'value': keyfile}, {'key': 'certfile', 'value': certfile}, {'key': 'pattern', 'value': 'test-pattern'}, {'key': 'port', 'value': self.server_fixture.server.port} ], }) self.backend = impl_nsd4.NSD4Backend(self.target) def test_create_domain(self): context = self.get_context() domain = self.get_domain_fixture() self.backend.create_domain(context, domain) command = 'NSDCT1 addzone %s test-pattern\n' % domain['name'] self.assertEqual(command, self.server_fixture.server.recved_command) def test_delete_domain(self): context = self.get_context() domain = self.get_domain_fixture() self.backend.delete_domain(context, domain) command = 'NSDCT1 delzone %s\n' % domain['name'] self.assertEqual(command, self.server_fixture.server.recved_command) def test_server_not_ok(self): self.server_fixture.server.response = 'goat' context = self.get_context() domain = self.get_domain_fixture() self.assertRaises(exceptions.Backend, self.backend.create_domain, context, domain) def test_ssl_error(self): self.backend._command = MagicMock(side_effect=ssl.SSLError) context = self.get_context() domain = self.get_domain_fixture() self.assertRaises(exceptions.Backend, self.backend.create_domain, context, domain) def test_socket_error(self): self.backend._command = MagicMock(side_effect=socket.error) context = self.get_context() domain = self.get_domain_fixture() self.assertRaises(exceptions.Backend, self.backend.create_domain, context, domain)
eventlet.StopServe()
tty.rs
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use alloc::sync::Arc; use core::any::Any; use ::qlib::mutex::*; use core::ops::Deref; use super::super::super::kernel::waiter::*; use super::super::super::kernel::waiter::qlock::*; use super::super::super::guestfdnotifier::*; use super::super::super::qlib::common::*; use super::super::super::qlib::linux_def::*; use super::super::super::qlib::socket_buf::*; use super::super::super::task::*; use super::super::super::threadmgr::processgroup::*; use super::super::super::threadmgr::session::*; use super::super::super::SHARESPACE; use super::super::super::quring::QUring; use super::super::file::*; use super::super::dirent::*; use super::super::attr::*; use super::super::dentry::*; use super::super::inode::*; use super::super::host::hostinodeop::*; use super::hostfileop::*; use super::ioctl::*; pub const NUM_CONTROL_CHARACTERS: usize = 19; pub const DISABLED_CHAR: u8 = 0; #[derive(Clone, Default, Copy)] #[repr(C)] pub struct Winsize { pub Row: u16, pub Col: u16, pub Xpixel: u16, pub Ypixel: u16, } #[derive(Clone, Default, Copy)] #[repr(C)] pub struct WindowSize { pub Rows: u16, pub Cols: u16, pub Pad: [u8; 4], } pub struct InputFlags {} impl InputFlags { pub const IGNBRK: u32 = 0o0000001; pub const BRKINT: u32 = 0o0000002; pub const IGNPAR: u32 = 0o0000004; pub const PARMRK: u32 = 0o0000010; pub const INPCK: u32 = 0o0000020; pub const ISTRIP: u32 = 0o0000040; pub const INLCR: u32 = 0o0000100; pub const IGNCR: u32 = 0o0000200; pub const ICRNL: u32 = 0o0000400; pub const IUCLC: u32 = 0o0001000; pub const IXON: u32 = 0o0002000; pub const IXANY: u32 = 0o0004000; pub const IXOFF: u32 = 0o0010000; pub const IMAXBEL: u32 = 0o0020000; pub const IUTF8: u32 = 0o0040000; } pub struct OutputFlags {} impl OutputFlags { pub const OPOST: u32 = 0o0000001; pub const OLCUC: u32 = 0o0000002; pub const ONLCR: u32 = 0o0000004; pub const OCRNL: u32 = 0o0000010; pub const ONOCR: u32 = 0o0000020; pub const ONLRET: u32 = 0o0000040; pub const OFILL: u32 = 0o0000100; pub const OFDEL: u32 = 0o0000200; pub const NLDLY: u32 = 0o0000400; pub const NL0: u32 = 0o0000000; pub const NL1: u32 = 0o0000400; pub const CRDLY: u32 = 0o0003000; pub const CR0: u32 = 0o0000000; pub const CR1: u32 = 0o0001000; pub const CR2: u32 = 0o0002000; pub const CR3: u32 = 0o0003000; pub const TABDLY: u32 = 0o0014000; pub const TAB0: u32 = 0o0000000; pub const TAB1: u32 = 0o0004000; pub const TAB2: u32 = 0o0010000; pub const TAB3: u32 = 0o0014000; pub const XTABS: u32 = 0o0014000; pub const BSDLY: u32 = 0o0020000; pub const BS0: u32 = 0o0000000; pub const BS1: u32 = 0o0020000; pub const VTDLY: u32 = 0o0040000; pub const VT0: u32 = 0o0000000; pub const VT1: u32 = 0o0040000; pub const FFDLY: u32 = 0o0100000; pub const FF0: u32 = 0o0000000; pub const FF1: u32 = 0o0100000; } pub struct ControlFlags {} impl ControlFlags { pub const CBAUD: u32 = 0o0010017; pub const B0: u32 = 0o0000000; pub const B50: u32 = 0o0000001; pub const B75: u32 = 0o0000002; pub const B110: u32 = 0o0000003; pub const B134: u32 = 0o0000004; pub const B150: u32 = 0o0000005; pub const B200: u32 = 0o0000006; pub const B300: u32 = 0o0000007; pub const B600: u32 = 0o0000010; pub const B1200: u32 = 0o0000011; pub const B1800: u32 = 0o0000012; pub const B2400: u32 = 0o0000013; pub const B4800: u32 = 0o0000014; pub const B9600: u32 = 0o0000015; pub const B19200: u32 = 0o0000016; pub const B38400: u32 = 0o0000017; pub const EXTA: u32 = Self::B19200; pub const EXTB: u32 = Self::B38400; pub const CSIZE: u32 = 0o0000060; pub const CS5: u32 = 0o0000000; pub const CS6: u32 = 0o0000020; pub const CS7: u32 = 0o0000040; pub const CS8: u32 = 0o0000060; pub const CSTOPB: u32 = 0o0000100; pub const CREAD: u32 = 0o0000200; pub const PARENB: u32 = 0o0000400; pub const PARODD: u32 = 0o0001000; pub const HUPCL: u32 = 0o0002000; pub const CLOCAL: u32 = 0o0004000; pub const CBAUDEX: u32 = 0o0010000; pub const BOTHER: u32 = 0o0010000; pub const B57600: u32 = 0o0010001; pub const B115200: u32 = 0o0010002; pub const B230400: u32 = 0o0010003; pub const B460800: u32 = 0o0010004; pub const B500000: u32 = 0o0010005; pub const B576000: u32 = 0o0010006; pub const B921600: u32 = 0o0010007; pub const B1000000: u32 = 0o0010010; pub const B1152000: u32 = 0o0010011; pub const B1500000: u32 = 0o0010012; pub const B2000000: u32 = 0o0010013; pub const B2500000: u32 = 0o0010014; pub const B3000000: u32 = 0o0010015; pub const B3500000: u32 = 0o0010016; pub const B4000000: u32 = 0o0010017; pub const CIBAUD: u32 = 0o002003600000; pub const CMSPAR: u32 = 0o010000000000; pub const CRTSCTS: u32 = 0o020000000000; // IBSHIFT is the shift from CBAUD to CIBAUD. pub const IBSHIFT: u32 = 0o16; } pub struct LocalFlags {} impl LocalFlags { pub const ISIG: u32 = 0o0000001; pub const ICANON: u32 = 0o0000002; pub const XCASE: u32 = 0o0000004; pub const ECHO: u32 = 0o0000010; pub const ECHOE: u32 = 0o0000020; pub const ECHOK: u32 = 0o0000040; pub const ECHONL: u32 = 0o0000100; pub const NOFLSH: u32 = 0o0000200; pub const TOSTOP: u32 = 0o0000400; pub const ECHOCTL: u32 = 0o0001000; pub const ECHOPRT: u32 = 0o0002000; pub const ECHOKE: u32 = 0o0004000; pub const FLUSHO: u32 = 0o0010000; pub const PENDIN: u32 = 0o0040000; pub const IEXTEN: u32 = 0o0100000; pub const EXTPROC: u32 = 0o0200000; } pub struct ControlFlagIndex {} impl ControlFlagIndex { pub const VINTR: usize = 0; pub const VQUIT: usize = 1; pub const VERASE: usize = 2; pub const VKILL: usize = 3; pub const VEOF: usize = 4; pub const VTIME: usize = 5; pub const VMIN: usize = 6; pub const VSWTC: usize = 7; pub const VSTART: usize = 8; pub const VSTOP: usize = 9; pub const VSUSP: usize = 10; pub const VEOL: usize = 11; pub const VREPRINT: usize = 12; pub const VDISCARD: usize = 13; pub const VWERASE: usize = 14; pub const VLNEXT: usize = 15; pub const VEOL2: usize = 16; } pub const fn ControlCharacter(c: char) -> u8 { return c as u8 - 'A' as u8 + 1 } pub const DEFAULT_CONTROL_CHARACTERS: [u8; NUM_CONTROL_CHARACTERS] = [ ControlCharacter('C'), // VINTR = ^C ControlCharacter('\\'), // VQUIT = ^\ '\x7f' as u8, // VERASE = DEL ControlCharacter('U'), // VKILL = ^U ControlCharacter('D'), // VEOF = ^D 0, // VTIME 1, // VMIN 0, // VSWTC ControlCharacter('Q'), // VSTART = ^Q ControlCharacter('S'), // VSTOP = ^S ControlCharacter('Z'), // VSUSP = ^Z 0, // VEOL ControlCharacter('R'), // VREPRINT = ^R ControlCharacter('O'), // VDISCARD = ^O ControlCharacter('W'), // VWERASE = ^W ControlCharacter('V'), // VLNEXT = ^V 0, // VEOL2 0, 0, ]; pub const MASTER_TERMIOS: KernelTermios = KernelTermios { InputFlags: 0, OutputFlags: 0, ControlFlags: ControlFlags::B38400 | ControlFlags::CS8 | ControlFlags::CREAD, LocalFlags: 0, LineDiscipline: 0, ControlCharacters: DEFAULT_CONTROL_CHARACTERS, InputSpeed: 38400, OutputSpeed: 38400, }; pub const DEFAULT_SLAVE_TERMIOS: KernelTermios = KernelTermios { InputFlags: InputFlags::ICRNL | InputFlags::IXON, OutputFlags: OutputFlags::OPOST | OutputFlags::ONLCR, ControlFlags: ControlFlags::B38400 | ControlFlags::CS8 | ControlFlags::CREAD, LocalFlags: LocalFlags::ISIG | LocalFlags::ICANON | LocalFlags::ECHO | LocalFlags::ECHOE | LocalFlags::ECHOK | LocalFlags::ECHOCTL | LocalFlags::ECHOKE | LocalFlags::IEXTEN, LineDiscipline: 0, ControlCharacters: DEFAULT_CONTROL_CHARACTERS, InputSpeed: 38400, OutputSpeed: 38400, }; #[derive(Clone, Default, Copy)] #[repr(C)] pub struct Termios { pub InputFlags: u32, pub OutputFlags: u32, pub ControlFlags: u32, pub LocalFlags: u32, pub LineDiscipline: u8, pub ControlCharacters: [u8; NUM_CONTROL_CHARACTERS], } #[derive(Clone, Default)] pub struct KernelTermios { pub InputFlags: u32, pub OutputFlags: u32, pub ControlFlags: u32, pub LocalFlags: u32, pub LineDiscipline: u8, pub ControlCharacters: [u8; NUM_CONTROL_CHARACTERS], pub InputSpeed: u32, pub OutputSpeed: u32, } impl KernelTermios { pub const VINTR: u8 = 0; pub const VQUIT: u8 = 1; pub const VERASE: u8 = 2; pub const VKILL: u8 = 3; pub const VEOF: u8 = 4; pub const VTIME: u8 = 5; pub const VMIN: u8 = 6; pub const VSWTC: u8 = 7; pub const VSTART: u8 = 8; pub const VSTOP: u8 = 9; pub const VSUSP: u8 = 10; pub const VEOL: u8 = 11; pub const VREPRINT: u8 = 12; pub const VDISCARD: u8 = 13; pub const VWERASE: u8 = 14; pub const VLNEXT: u8 = 15; pub const VEOL2: u8 = 16; pub fn IEnabled(&self, flag: u32) -> bool { return self.InputFlags & flag == flag; } pub fn OEnabled(&self, flag: u32) -> bool { return self.OutputFlags & flag == flag; } pub fn CEnabled(&self, flag: u32) -> bool { return self.ControlFlags & flag == flag; } pub fn LEnabled(&self, flag: u32) -> bool { return self.LocalFlags & flag == flag; } pub fn ToTermios(&self) -> Termios { return Termios { InputFlags: self.InputFlags, OutputFlags: self.OutputFlags, ControlFlags: self.ControlFlags, LocalFlags: self.LocalFlags, LineDiscipline: self.LineDiscipline, ControlCharacters: self.ControlCharacters, } } pub fn FromTermios(&mut self, term: &Termios) { self.InputFlags = term.InputFlags; self.OutputFlags = term.OutputFlags; self.ControlFlags = term.ControlFlags; self.LocalFlags = term.LocalFlags; self.LineDiscipline = term.LineDiscipline; self.ControlCharacters = term.ControlCharacters; } pub fn IsTerminating(&self, cBytes: &[u8]) -> bool { if cBytes.len() != 1 { return false; } let c = cBytes[0]; if self.IsEOF(c) { return true; } if c == DISABLED_CHAR { return false; } else if c == '\n' as u8 || c == self.ControlCharacters[Self::VEOL as usize] { return true; } else if c == self.ControlCharacters[Self::VEOL2 as usize] { return self.LEnabled(LocalFlags::IEXTEN); } return false; } pub fn IsEOF(&self, c: u8) -> bool { return c == self.ControlCharacters[Self::VEOF as usize] && self.ControlCharacters[Self::VEOF as usize] == DISABLED_CHAR } } pub struct TTYFileOpsInternal { pub fileOps: Arc<HostFileOp>, pub termios: KernelTermios, pub session: Option<Session>, pub fgProcessgroup: Option<ProcessGroup>, pub fd: i32, pub buf: Arc<SocketBuff>, pub queue: Queue, pub bufWriteLock: QAsyncLock, } impl TTYFileOpsInternal { fn checkChange(&self, _task: &Task, _sig: Signal) -> Result<()> { return Ok(()) /*let thread = match &task.thread { // No task? Linux does not have an analog for this case, but // tty_check_change is more of a blacklist of cases than a // whitelist, and is surprisingly permissive. Allowing the // change seems most appropriate. None => return Ok(()), Some(ref t) => t.clone(), }; let tg = thread.lock().tg.clone(); let pg = tg.ProcessGroup(); // If the session for the task is different than the session for the // controlling TTY, then the change is allowed. Seems like a bad idea, // but that's exactly what linux does. if tg.Session() != self.fgProcessgroup.Session() { return Ok(()) } // If we are the foreground process group, then the change is allowed. if pg == self.fgProcessgroup { return Ok(()) } // We are not the foreground process group. // Is the provided signal blocked or ignored? if (thread.SignalMask() & SignalSet::SignalSetOf(&sig).0 != 0 || tg.SignalHandlers())*/ } } #[derive(Clone)] pub struct TTYFileOps(Arc<QMutex<TTYFileOpsInternal>>); impl Deref for TTYFileOps { type Target = Arc<QMutex<TTYFileOpsInternal>>; fn deref(&self) -> &Arc<QMutex<TTYFileOpsInternal>> { &self.0 } } pub const ENABLE_RINGBUF : bool = true; impl TTYFileOps { pub fn New(fops: Arc<HostFileOp>) -> Self { let queue = fops.InodeOp.lock().queue.clone(); let fd = fops.InodeOp.lock().HostFd; let bufWriteLock = fops.InodeOp.lock().bufWriteLock.clone(); let internal = TTYFileOpsInternal { fileOps: fops, termios: DEFAULT_SLAVE_TERMIOS, session: None, fgProcessgroup: None, fd: fd, buf: Arc::new(SocketBuff::Init(MemoryDef::DEFAULT_BUF_PAGE_COUNT)), queue: queue, bufWriteLock: bufWriteLock, }; if SHARESPACE.config.read().UringIO && ENABLE_RINGBUF { QUring::BufSockInit(internal.fd, internal.queue.clone(), internal.buf.clone(), false).unwrap(); } return Self(Arc::new(QMutex::new(internal))) } pub fn InitForegroundProcessGroup(&self, pg: &ProcessGroup) { let mut t = self.lock(); if t.fgProcessgroup.is_some() { panic!("foreground process group is already set"); } t.fgProcessgroup = Some(pg.clone()); t.session = Some(pg.Session()); } pub fn ForegroundProcessGroup(&self) -> Option<ProcessGroup> { return self.lock().fgProcessgroup.clone(); } pub fn BufWriteLock(&self) -> QAsyncLock { return self.lock().bufWriteLock.clone(); } } impl Waitable for TTYFileOps { fn Readiness(&self, task: &Task,mask: EventMask) -> EventMask { if SHARESPACE.config.read().UringIO && ENABLE_RINGBUF { return self.lock().buf.Events() & mask } let fops = self.lock().fileOps.clone(); return fops.Readiness(task, mask) } fn EventRegister(&self, task: &Task,e: &WaitEntry, mask: EventMask) { let queue = self.lock().queue.clone(); queue.EventRegister(task, e, mask); let fd = self.lock().fd; if !SHARESPACE.config.read().UringIO && ENABLE_RINGBUF { UpdateFD(fd).unwrap(); }; } fn EventUnregister(&self, task: &Task,e: &WaitEntry) { let queue = self.lock().queue.clone(); queue.EventUnregister(task, e); let fd = self.lock().fd; if !SHARESPACE.config.read().UringIO && ENABLE_RINGBUF { UpdateFD(fd).unwrap(); }; } } impl SpliceOperations for TTYFileOps {} impl FileOperations for TTYFileOps { fn as_any(&self) -> &Any { return self; } fn FopsType(&self) -> FileOpsType { return FileOpsType::TTYFileOps } fn Seekable(&self) -> bool { return false; } fn Seek(&self, task: &Task, f: &File, whence: i32, current: i64, offset: i64) -> Result<i64> { let fops = self.lock().fileOps.clone(); let res = fops.Seek(task, f, whence, current, offset); return res; } fn ReadDir(&self, task: &Task, f: &File, offset: i64, serializer: &mut DentrySerializer) -> Result<i64> { let fops = self.lock().fileOps.clone(); let res = fops.ReadDir(task, f, offset, serializer); return res; } fn ReadAt(&self, task: &Task, f: &File, dsts: &mut [IoVec], offset: i64, blocking: bool) -> Result<i64>
fn WriteAt(&self, task: &Task, f: &File, srcs: &[IoVec], offset: i64, blocking: bool) -> Result<i64> { { let t = self.lock(); if t.termios.LEnabled(LocalFlags::TOSTOP) { t.checkChange(task, Signal(Signal::SIGTTOU))?; } } if SHARESPACE.config.read().UringIO && ENABLE_RINGBUF { /* let size = IoVec::NumBytes(srcs); let mut buf = DataBuff::New(size); task.CopyDataInFromIovs(&mut buf.buf, &srcs)?; let iovs = buf.Iovs();*/ let fd = self.lock().fd; let queue = self.lock().queue.clone(); let ringBuf = self.lock().buf.clone(); let lock = self.BufWriteLock().Lock(task); return QUring::RingFileWrite(task, fd, queue, ringBuf, srcs, Arc::new(self.clone()), lock) } let fops = self.lock().fileOps.clone(); let res = fops.WriteAt(task, f, srcs, offset, blocking); return res; } fn Append(&self, task: &Task, f: &File, srcs: &[IoVec]) -> Result<(i64, i64)> { { let t = self.lock(); if t.termios.LEnabled(LocalFlags::TOSTOP) { t.checkChange(task, Signal(Signal::SIGTTOU))?; } } let fops = self.lock().fileOps.clone(); let res = fops.Append(task, f, srcs); return res; } fn Fsync(&self, task: &Task, f: &File, start: i64, end: i64, syncType: SyncType) -> Result<()> { let fops = self.lock().fileOps.clone(); let res = fops.Fsync(task, f, start, end, syncType); return res; } fn Flush(&self, task: &Task, f: &File) -> Result<()> { if SHARESPACE.config.read().UringIO && ENABLE_RINGBUF { // try to gain the lock once, release immediately self.BufWriteLock().Lock(task); } let fops = self.lock().fileOps.clone(); let res = fops.Flush(task, f); return res; } fn UnstableAttr(&self, task: &Task, f: &File) -> Result<UnstableAttr> { let fops = self.lock().fileOps.clone(); let res = fops.UnstableAttr(task, f); return res; } fn Ioctl(&self, task: &Task, _f: &File, _fd: i32, request: u64, val: u64) -> Result<()> { let fops = self.lock().fileOps.clone(); let fd = fops.as_any().downcast_ref::<HostFileOp>().expect("Ioctl: not Hostfilop") .InodeOp.as_any().downcast_ref::<HostInodeOp>().expect("Ioctl: not HostInodeOp").HostFd(); let ioctl = request; match ioctl { IoCtlCmd::TCGETS => { let mut term = Termios::default(); ioctlGetTermios(fd, &mut term)?; task.CopyOutObj(&term, val)?; return Ok(()) } IoCtlCmd::TCSETS | IoCtlCmd::TCSETSW | IoCtlCmd::TCSETSF => { self.lock().checkChange(task, Signal(Signal::SIGTTOU))?; let t: Termios = task.CopyInObj(val)?; ioctlSetTermios(fd, ioctl, &t)?; self.lock().termios.FromTermios(&t); return Ok(()) } IoCtlCmd::TIOCGPGRP => { let thread = task.Thread(); let tg = thread.ThreadGroup(); let pidns = tg.PIDNamespace(); let internal = self.lock(); let pgid = pidns.IDOfProcessGroup(internal.fgProcessgroup.as_ref().unwrap()); info!("TIOCGPGRP pgid is {}, val is {:x}", pgid, val); task.CopyOutObj(&pgid, val)?; return Ok(()) } IoCtlCmd::TIOCSPGRP => { let thread = match &task.thread { None => return Err(Error::SysError(SysErr::ENOTTY)), Some(ref t) => t.clone(), }; let mut t = self.lock(); match t.checkChange(task, Signal(Signal::SIGTTOU)) { // drivers/tty/tty_io.c:tiocspgrp() converts -EIO from // tty_check_change() to -ENOTTY. Err(Error::SysError(SysErr::EIO)) => return Err(Error::SysError(SysErr::ENOTTY)), Err(e) => return Err(e), Ok(()) => (), } let tg = thread.ThreadGroup(); let session = tg.Session(); if session != t.session { return Err(Error::SysError(SysErr::ENOTTY)); } let pgid: i32 = task.CopyInObj(val)?; if pgid < 0 { return Err(Error::SysError(SysErr::EINVAL)); } let pidns = tg.PIDNamespace(); let pg = match pidns.ProcessGroupWithID(pgid) { None => return Err(Error::SysError(SysErr::ESRCH)), Some(pg) => pg, }; // Check that new process group is in the TTY session. if pg.Session() != t.session.clone().unwrap() { return Err(Error::SysError(SysErr::EPERM)) } t.fgProcessgroup = Some(pg); return Ok(()) } IoCtlCmd::TIOCGWINSZ => { let mut win = Winsize::default(); ioctlGetWinsize(fd, &mut win)?; task.CopyOutObj(&win, val)?; return Ok(()) } IoCtlCmd::TIOCSWINSZ => { let w: Winsize = task.CopyInObj(val)?; return ioctlSetWinsize(fd, &w) } IoCtlCmd::TIOCSETD | IoCtlCmd::TIOCSBRK | IoCtlCmd::TIOCCBRK | IoCtlCmd::TCSBRK | IoCtlCmd::TCSBRKP | IoCtlCmd::TIOCSTI | IoCtlCmd::TIOCCONS | IoCtlCmd::FIONBIO | IoCtlCmd::TIOCEXCL | IoCtlCmd::TIOCNXCL | IoCtlCmd::TIOCGEXCL | IoCtlCmd::TIOCNOTTY | IoCtlCmd::TIOCSCTTY | IoCtlCmd::TIOCGSID | IoCtlCmd::TIOCGETD | IoCtlCmd::TIOCVHANGUP | IoCtlCmd::TIOCGDEV | IoCtlCmd::TIOCMGET | IoCtlCmd::TIOCMSET | IoCtlCmd::TIOCMBIC | IoCtlCmd::TIOCMBIS | IoCtlCmd::TIOCGICOUNT | IoCtlCmd::TCFLSH | IoCtlCmd::TIOCSSERIAL | IoCtlCmd::TIOCGPTPEER => { //not implmentated return Err(Error::SysError(SysErr::ENOTTY)) } _ => { return Err(Error::SysError(SysErr::ENOTTY)) } } } fn IterateDir(&self, task: &Task,d: &Dirent, dirCtx: &mut DirCtx, offset: i32) -> (i32, Result<i64>) { let fops = self.lock().fileOps.clone(); return fops.IterateDir(task, d, dirCtx, offset); } fn Mappable(&self) -> Result<HostInodeOp> { return Err(Error::SysError(SysErr::ENODEV)) } } impl SockOperations for TTYFileOps {}
{ self.lock().checkChange(task, Signal(Signal::SIGTTIN))?; if SHARESPACE.config.read().UringIO && ENABLE_RINGBUF{ let fd = self.lock().fd; let queue = self.lock().queue.clone(); let ringBuf = self.lock().buf.clone(); let ret = QUring::RingFileRead(task, fd, queue, ringBuf, dsts, false)?; return Ok(ret); } let fops = self.lock().fileOps.clone(); let res = fops.ReadAt(task, f, dsts, offset, blocking); return res; }
0002_auto_20180704_0002.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2018-07-04 00:02 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class
(migrations.Migration): dependencies = [ ('library', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='module', name='professions', ), migrations.AddField( model_name='module', name='profession', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.DO_NOTHING, to='library.Profession'), preserve_default=False, ), ]
Migration
effective_network_security_group_list_result.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class EffectiveNetworkSecurityGroupListResult(Model): """Response for list effective network security groups API service call. Variables are only populated by the server, and will be ignored when sending a request. :param value: A list of effective network security groups. :type value: list[~azure.mgmt.network.v2017_06_01.models.EffectiveNetworkSecurityGroup] :ivar next_link: The URL to get the next set of results. :vartype next_link: str """ _validation = { 'next_link': {'readonly': True}, }
'value': {'key': 'value', 'type': '[EffectiveNetworkSecurityGroup]'}, 'next_link': {'key': 'nextLink', 'type': 'str'}, } def __init__(self, value=None): self.value = value self.next_link = None
_attribute_map = {
usa.go
package data import ( "fmt" "strings" gjson "github.com/tidwall/gjson" ) // USState represents a US State, duh. type USState struct { FullName string `json:"fullName"` ShortName string `json:"shortName"` RegionID string `json:"regionID"` DivisionID string `json:"divisionID"` FIPSCode string `json:"fipsCode"` } // USStateJSON is a JSON representation of all US States & metadata. const USStateJSON = ` [ {"fullName": "Alabama", "shortName": "AL", "regionID": "6", "divisionID": "3", "fipsCode": "01"}, {"fullName": "Alaska", "shortName": "AK", "regionID": "8", "divisionID": "4", "fipsCode": "02"}, {"fullName": "Arizona", "shortName": "AZ", "regionID": "7", "divisionID": "4", "fipsCode": "04"}, {"fullName": "Arkansas", "shortName": "AR", "regionID": "6", "divisionID": "3", "fipsCode": "05"}, {"fullName": "California", "shortName": "CA", "regionID": "8", "divisionID": "4", "fipsCode": "06"}, {"fullName": "Colorado", "shortName": "CO", "regionID": "7", "divisionID": "4", "fipsCode": "08"}, {"fullName": "Connecticut", "shortName": "CT", "regionID": "1", "divisionID": "1", "fipsCode": "09"}, {"fullName": "Delaware", "shortName": "DE", "regionID": "5", "divisionID": "3", "fipsCode": "10"}, {"fullName": "District of Columbia", "shortName": "DC", "regionID": "5", "divisionID": "3", "fipsCode": "11"}, {"fullName": "Florida", "shortName": "FL", "regionID": "5", "divisionID": "3", "fipsCode": "12"}, {"fullName": "Georgia", "shortName": "GA", "regionID": "5", "divisionID": "3", "fipsCode": "13"}, {"fullName": "Hawaii", "shortName": "HI", "regionID": "8", "divisionID": "4", "fipsCode": "15"}, {"fullName": "Idaho", "shortName": "ID", "regionID": "7", "divisionID": "4", "fipsCode": "16"}, {"fullName": "Illinois", "shortName": "IL", "regionID": "3", "divisionID": "2", "fipsCode": "17"}, {"fullName": "Indiana", "shortName": "IN", "regionID": "3", "divisionID": "2", "fipsCode": "18"}, {"fullName": "Iowa", "shortName": "IA", "regionID": "4", "divisionID": "2", "fipsCode": "19"}, {"fullName": "Kansas", "shortName": "KS", "regionID": "4", "divisionID": "2", "fipsCode": "20"}, {"fullName": "Kentucky", "shortName": "KY", "regionID": "6", "divisionID": "3", "fipsCode": "21"}, {"fullName": "Louisiana", "shortName": "LA", "regionID": "6", "divisionID": "3", "fipsCode": "22"}, {"fullName": "Maine", "shortName": "ME", "regionID": "1", "divisionID": "1", "fipsCode": "23"}, {"fullName": "Maryland", "shortName": "MD", "regionID": "5", "divisionID": "3", "fipsCode": "24"}, {"fullName": "Massachusetts", "shortName": "MA", "regionID": "1", "divisionID": "1", "fipsCode": "25"}, {"fullName": "Michigan", "shortName": "MI", "regionID": "3", "divisionID": "2", "fipsCode": "26"}, {"fullName": "Minnesota", "shortName": "MN", "regionID": "4", "divisionID": "2", "fipsCode": "27"}, {"fullName": "Mississippi", "shortName": "MS", "regionID": "6", "divisionID": "3", "fipsCode": "28"}, {"fullName": "Missouri", "shortName": "MO", "regionID": "4", "divisionID": "2", "fipsCode": "29"}, {"fullName": "Montana", "shortName": "MT", "regionID": "7", "divisionID": "4", "fipsCode": "30"}, {"fullName": "Nebraska", "shortName": "NE", "regionID": "4", "divisionID": "2", "fipsCode": "31"}, {"fullName": "Nevada", "shortName": "NV", "regionID": "7", "divisionID": "4", "fipsCode": "32"}, {"fullName": "New Hampshire", "shortName": "NH", "regionID": "1", "divisionID": "1", "fipsCode": "33"}, {"fullName": "New Jersey", "shortName": "NJ", "regionID": "2", "divisionID": "1", "fipsCode": "34"}, {"fullName": "New Mexico", "shortName": "NM", "regionID": "7", "divisionID": "4", "fipsCode": "35"}, {"fullName": "New York", "shortName": "NY", "regionID": "2", "divisionID": "1", "fipsCode": "36"}, {"fullName": "North Carolina", "shortName": "NC", "regionID": "5", "divisionID": "3", "fipsCode": "37"}, {"fullName": "North Dakota", "shortName": "ND", "regionID": "4", "divisionID": "2", "fipsCode": "38"}, {"fullName": "Ohio", "shortName": "OH", "regionID": "3", "divisionID": "2", "fipsCode": "39"}, {"fullName": "Oklahoma", "shortName": "OK", "regionID": "6", "divisionID": "3", "fipsCode": "40"}, {"fullName": "Oregon", "shortName": "OR", "regionID": "8", "divisionID": "4", "fipsCode": "41"}, {"fullName": "Pennsylvania", "shortName": "PA", "regionID": "2", "divisionID": "1", "fipsCode": "42"}, {"fullName": "Rhode Island", "shortName": "RI", "regionID": "1", "divisionID": "1", "fipsCode": "44"}, {"fullName": "South Carolina", "shortName": "SC", "regionID": "5", "divisionID": "3", "fipsCode": "45"}, {"fullName": "South Dakota", "shortName": "SD", "regionID": "4", "divisionID": "2", "fipsCode": "46"}, {"fullName": "Tennessee", "shortName": "TN", "regionID": "6", "divisionID": "3", "fipsCode": "47"}, {"fullName": "Texas", "shortName": "TX", "regionID": "6", "divisionID": "3", "fipsCode": "48"}, {"fullName": "Utah", "shortName": "UT", "regionID": "7", "divisionID": "4", "fipsCode": "49"}, {"fullName": "Vermont", "shortName": "VT", "regionID": "1", "divisionID": "1", "fipsCode": "50"}, {"fullName": "Virginia", "shortName": "VA", "regionID": "5", "divisionID": "3", "fipsCode": "51"}, {"fullName": "Washington", "shortName": "WA", "regionID": "8", "divisionID": "4", "fipsCode": "53"}, {"fullName": "West Virginia", "shortName": "WV", "regionID": "5", "divisionID": "3", "fipsCode": "54"}, {"fullName": "Wisconsin", "shortName": "WI", "regionID": "3", "divisionID": "2", "fipsCode": "55"}, {"fullName": "Wyoming", "shortName": "WY", "regionID": "7", "divisionID": "4", "fipsCode": "56"} ] ` // GetState gets a state, duh. func GetState(search string) (r USState, e error) { gjson.AddModifier("case", func(json, arg string) string { if arg == "upper" { return strings.ToUpper(json) } if arg == "lower" { return strings.ToLower(json) } return json }) states := gjson.Parse(USStateJSON).Array() sl := strings.ToLower(search) e = nil for _, s := range states { if sl == s.Get("fullName|@case:lower").Str || sl == s.Get("shortName|@case:lower").Str { r = USState{ FullName: s.Get("fullName").Str, ShortName: s.Get("shortName").Str, RegionID: s.Get("regionID").Str, DivisionID: s.Get("divisionID").Str, FIPSCode: s.Get("fipsCode").Str, } break } } if (USState{} == r) { e = fmt.Errorf("No State Matches '%s'", search)
} return }
main.rs
#![deny(unsafe_code)] use anyhow::{anyhow, Result}; use argh::FromArgs; use std::{env, process}; mod constants; mod onesignal; use onesignal::{Contents, Headings, NotificationPayload}; /// Send push notification when your long build command finish #[derive(Debug, FromArgs)] struct Args { /// the command that we will execute. #[argh(positional)] cmd: String, /// override the UserIDs that we will send notification to. /// normally this will be stored in `IMPULSE_USER_IDS` env. /// UIDs is separated by `,`. #[argh(option)] uids: Option<String>, /// set the success message. /// default to: Build exit successfully #[argh(option, short = 's')] success_message: Option<String>, /// set the error message. /// default to: Build errored #[argh(option, short = 'e')] error_message: Option<String>, } fn main() -> Result<()> { let args: Args = argh::from_env(); let uids = env::var("IMPULSE_USER_IDS") .ok() .or(args.uids) .ok_or_else(|| anyhow!(constants::MISSING_USERID_ERR))?; let result = exec(&args.cmd)?; let msg = if result { args.success_message .unwrap_or_else(|| String::from("Build exit successfully")) } else { args.error_message .unwrap_or_else(|| String::from("Build errored")) }; onesignal::send_notification(NotificationPayload { app_id: constants::ONE_SIGNAL_APP_ID, include_player_ids: uids.split(',').map(|v| v.to_owned()).collect(), headings: Headings { en: "Build Result".to_string(), }, contents: Contents { en: msg }, chrome_web_image: String::new(), web_url: String::new(), })?; Ok(()) } pub fn
(cmd: &str) -> Result<bool> { let mut cmd = cmd.split_whitespace(); let mut child = process::Command::new(cmd.next().unwrap_or_default()) .args(cmd) .spawn()?; Ok(child.wait()?.success()) }
exec
metrics.py
import typing as ty import numpy as np import scipy.special import sklearn.metrics as skm from . import util def calculate_metrics( task_type: str, y: np.ndarray, prediction: np.ndarray, classification_mode: str, y_info: ty.Optional[ty.Dict[str, ty.Any]], ) -> ty.Dict[str, float]: if task_type == util.REGRESSION: del classification_mode rmse = skm.mean_squared_error(y, prediction) ** 0.5 # type: ignore[code] if y_info: if y_info['policy'] == 'mean_std': rmse *= y_info['std'] else: assert False return {'rmse': rmse, 'score': -rmse} else: assert task_type in (util.BINCLASS, util.MULTICLASS) labels = None if classification_mode == 'probs': probs = prediction elif classification_mode == 'logits': probs = ( scipy.special.expit(prediction) if task_type == util.BINCLASS else scipy.special.softmax(prediction, axis=1) ) else: assert classification_mode == 'labels' probs = None labels = prediction if labels is None: labels = ( np.round(probs).astype('int64') if task_type == util.BINCLASS else probs.argmax(axis=1) # type: ignore[code] ) result = skm.classification_report(y, labels, output_dict=True) # type: ignore[code] if task_type == util.BINCLASS: result['roc_auc'] = skm.roc_auc_score(y, probs) # type: ignore[code] result['score'] = result['accuracy'] # type: ignore[code] return result # type: ignore[code] def make_summary(metrics: ty.Dict[str, ty.Any]) -> str: precision = 3 summary = {} for k, v in metrics.items(): if k.isdigit(): continue k = { 'score': 'SCORE', 'accuracy': 'acc', 'roc_auc': 'roc_auc', 'macro avg': 'm', 'weighted avg': 'w', }.get(k, k) if isinstance(v, float): v = round(v, precision) summary[k] = v else: v = { {'precision': 'p', 'recall': 'r', 'f1-score': 'f1', 'support': 's'}.get( x, x ): round(v[x], precision) for x in v } for item in v.items(): summary[k + item[0]] = item[1] s = [f'score = {summary.pop("SCORE"):.3f}'] for k, v in summary.items(): if k not in ['mp', 'mr', 'wp', 'wr']: # just to save screen space
return ' | '.join(s)
s.append(f'{k} = {v}')
ls.rs
#![crate_name = "uu_ls"] // This file is part of the uutils coreutils package. // // (c) Jeremiah Peschka <[email protected]> // // For the full copyright and license information, please view the LICENSE file // that was distributed with this source code. // extern crate getopts; extern crate pretty_bytes; extern crate termsize; extern crate term_grid; extern crate time; extern crate unicode_width; use pretty_bytes::converter::convert; use term_grid::{Grid, GridOptions, Direction, Filling, Cell}; use time::{Timespec, strftime}; #[macro_use] extern crate lazy_static; #[macro_use] extern crate uucore; #[cfg(unix)] use uucore::libc::{S_ISUID, S_ISGID, S_ISVTX, S_IRUSR, S_IWUSR, S_IXUSR, S_IRGRP, S_IWGRP, S_IXGRP, S_IROTH, S_IWOTH, S_IXOTH, mode_t}; use std::fs; use std::fs::{DirEntry, FileType, Metadata}; use std::path::{Path, PathBuf}; use std::io::Write; use std::collections::HashMap; #[cfg(unix)] use std::os::unix::fs::MetadataExt; #[cfg(unix)] use std::os::unix::fs::FileTypeExt; #[cfg(unix)] use unicode_width::UnicodeWidthStr; #[cfg(windows)] use std::os::windows::fs::MetadataExt; static NAME: &'static str = "ls"; static SUMMARY: &'static str = ""; static LONG_HELP: &'static str = " By default, ls will list the files and contents of any directories on the command line, expect that it will ignore files and directories whose names start with '.' "; static DEFAULT_COLORS: &'static str = "rs=0:di=01;34:ln=01;36:mh=00:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:mi=00:su=37;41:sg=30;43:ca=30;41:tw=30;42:ow=34;42:st=37;44:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arc=01;31:*.arj=01;31:*.taz=01;31:*.lha=01;31:*.lz4=01;31:*.lzh=01;31:*.lzma=01;31:*.tlz=01;31:*.txz=01;31:*.tzo=01;31:*.t7z=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.dz=01;31:*.gz=01;31:*.lrz=01;31:*.lz=01;31:*.lzo=01;31:*.xz=01;31:*.bz2=01;31:*.bz=01;31:*.tbz=01;31:*.tbz2=01;31:*.tz=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.war=01;31:*.ear=01;31:*.sar=01;31:*.rar=01;31:*.alz=01;31:*.ace=01;31:*.zoo=01;31:*.cpio=01;31:*.7z=01;31:*.rz=01;31:*.cab=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.svg=01;35:*.svgz=01;35:*.mng=01;35:*.pcx=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.m2v=01;35:*.mkv=01;35:*.webm=01;35:*.ogm=01;35:*.mp4=01;35:*.m4v=01;35:*.mp4v=01;35:*.vob=01;35:*.qt=01;35:*.nuv=01;35:*.wmv=01;35:*.asf=01;35:*.rm=01;35:*.rmvb=01;35:*.flc=01;35:*.avi=01;35:*.fli=01;35:*.flv=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.yuv=01;35:*.cgm=01;35:*.emf=01;35:*.ogv=01;35:*.ogx=01;35:*.aac=00;36:*.au=00;36:*.flac=00;36:*.m4a=00;36:*.mid=00;36:*.midi=00;36:*.mka=00;36:*.mp3=00;36:*.mpc=00;36:*.ogg=00;36:*.ra=00;36:*.wav=00;36:*.oga=00;36:*.opus=00;36:*.spx=00;36:*.xspf=00;36:"; lazy_static! { static ref LS_COLORS: String = std::env::var("LS_COLORS").unwrap_or(DEFAULT_COLORS.to_string()); static ref COLOR_MAP: HashMap<&'static str, &'static str> = { let codes = LS_COLORS.split(":"); let mut map = HashMap::new(); for c in codes { let p: Vec<_> = c.split("=").collect(); if p.len() == 2 { map.insert(p[0], p[1]); } } map }; static ref RESET_CODE: &'static str = COLOR_MAP.get("rs").unwrap_or(&"0"); static ref LEFT_CODE: &'static str = COLOR_MAP.get("lc").unwrap_or(&"\x1b["); static ref RIGHT_CODE: &'static str = COLOR_MAP.get("rc").unwrap_or(&"m"); static ref END_CODE: &'static str = COLOR_MAP.get("ec").unwrap_or(&""); } pub fn uumain(args: Vec<String>) -> i32 { let syntax = format!("[OPTION]... DIRECTORY {0} [OPTION]... [FILE]...", NAME); let matches = new_coreopts!(&syntax, SUMMARY, LONG_HELP) .optflag("a", "all", "Do not ignore hidden files (files with names that start with '.').") .optflag("A", "almost-all", "In a directory, do not ignore all file names that start with '.', only ignore \ '.' and '..'.") .optflag("c", "", "If the long listing format (e.g., -l, -o) is being used, print the status \ change time (the ‘ctime’ in the inode) instead of the modification time. When \ explicitly sorting by time (--sort=time or -t) or when not using a long listing \ format, sort according to the status change time.") .optflag("d", "directory", "Only list the names of directories, rather than listing directory contents. \ This will not follow symbolic links unless one of `--dereference-command-line \ (-H)`, `--dereference (-L)`, or `--dereference-command-line-symlink-to-dir` is \ specified.") .optflag("F", "classify", "Append a character to each file name indicating the file type. Also, for \ regular files that are executable, append '*'. The file type indicators are \ '/' for directories, '@' for symbolic links, '|' for FIFOs, '=' for sockets, \ '>' for doors, and nothing for regular files.") .optflag("h", "human-readable", "Print human readable file sizes (e.g. 1K 234M 56G).") .optflag("L", "dereference", "When showing file information for a symbolic link, show information for the \ file the link references rather than the link itself.") .optflag("l", "long", "Display detailed information.") .optflag("r", "reverse", "Reverse whatever the sorting method is--e.g., list files in reverse \ alphabetical order, youngest first, smallest first, or whatever.") .optflag("R", "recursive", "List the contents of all directories recursively.") .optflag("S", "", "Sort by file size, largest first.") .optflag("t", "", "Sort by modification time (the 'mtime' in the inode), newest first.") .optflag("U", "", "Do not sort; list the files in whatever order they are stored in the \ directory. This is especially useful when listing very large directories, \ since not doing any sorting can be noticeably faster.") .optflag("", "color", "Color output based on file type.") .parse(args); list(matches); 0 } fn list(options: getopts::Matches) { let locs: Vec<String> = if options.free.is_empty() { vec![String::from(".")] } else { options.free.iter().cloned().collect() }; let mut files = Vec::<PathBuf>::new(); let mut dirs = Vec::<PathBuf>::new(); for loc in locs { let p = PathBuf::from(&loc); let mut dir = false; if p.is_dir() && !options.opt_present("d") { dir = true; if !options.opt_present("L") { if let Ok(md) = p.symlink_metadata() { if md.file_type().is_symlink() { dir = false; } } } } if dir { dirs.push(p); } else { files.push(p); } } sort_entries(&mut files, &options); display_items(&files, None, &options); sort_entries(&mut dirs, &options); for dir in dirs { if options.free.len() > 1 { println!("\n{}:", dir.to_string_lossy()); } enter_directory(&dir, &options); } } #[cfg(unix)] fn sort_entries(entries: &mut Vec<PathBuf>, options: &getopts::Matches) { let mut reverse = options.opt_present("r"); if options.opt_present("t") { if options.opt_present("c") { entries.sort_by_key(|k| get_metadata(k, options).map(|md| md.ctime()).unwrap_or(0)); } else { entries.sort_by_key(|k| { get_metadata(k, options) .and_then(|md| md.modified()) .unwrap_or(std::time::UNIX_EPOCH) }); } } else if options.opt_present("S") { entries.sort_by_key(|k| get_metadata(k, options).map(|md| md.size()).unwrap_or(0)); reverse = !reverse; } else if !options.opt_present("U") { entries.sort(); } if reverse { entries.reverse(); } } #[cfg(windows)] fn sort_entries(entries: &mut Vec<PathBuf>, options: &getopts::Matches) { let mut reverse = options.opt_present("r"); if options.opt_present("t") { entries.sort_by_key(|k| { get_metadata(k, options) .and_then(|md| md.modified()) .unwrap_or(std::time::UNIX_EPOCH) }); } else if options.opt_present("S") { entries.sort_by_key(|k| get_metadata(k, options).map(|md| md.file_size()).unwrap_or(0)); reverse = !reverse; } else if !options.opt_present("U") { entries.sort(); } if reverse { entries.reverse(); } } fn max(lhs: usize, rhs: usize) -> usize { if lhs > rhs { lhs } else { rhs } } fn enter_directory(dir: &PathBuf, options: &getopts::Matches) { let mut entries = safe_unwrap!(fs::read_dir(dir) .and_then(|e| e.collect::<Result<Vec<_>, _>>())); if !options.opt_present("a") && !options.opt_present("A") { entries.retain(|e| !e.file_name().to_string_lossy().starts_with('.')) } let mut entries: Vec<_> = entries.iter().map(DirEntry::path).collect(); if options.opt_present("a") { entries.push(dir.join(".")); entries.push(dir.join("..")); } sort_entries(&mut entries, options); display_items(&entries, Some(dir), options); if options.opt_present("R") { for e in entries.iter().filter(|p| p.is_dir()) { println!("\n{}:", e.to_string_lossy()); enter_directory(&e, options); } } } fn get_metadata(entry: &PathBuf, options: &getopts::Matches) -> std::io::Result<Metadata> { if options.opt_present("L") { entry.metadata().or(entry.symlink_metadata()) } else { entry.symlink_metadata() } } fn display_dir_entry_size(entry: &PathBuf, options: &getopts::Matches) -> (usize, usize) { if let Ok(md) = get_metadata(entry, options) { (display_symlink_count(&md).len(), display_file_size(&md, options).len()) } else { (0, 0) } } fn pad_left(string: String, count: usize) -> String { if count > string.len() { let pad = count - string.len(); let pad = String::from_utf8(vec![' ' as u8; pad]).unwrap(); format!("{}{}", pad, string) } else { string } } fn display_items(items: &Vec<PathBuf>, strip: Option<&Path>, options: &getopts::Matches) { if options.opt_present("long") { let (mut max_links, mut max_size) = (1, 1); for item in items { let (links, size) = display_dir_entry_size(item, options); max_links = max(links, max_links); max_size = max(size, max_size); } for item in items { display_item_long(item, strip, max_links, max_size, options); } } else { let names: Vec<_> = items.iter() .filter_map(|i| { let md = get_metadata(i, options); match md { Err(e) => { let filename = get_file_name(i, strip); show_error!("{}: {}", filename, e); None } Ok(md) => Some(display_file_name(&i, strip, &md, options)), } }) .collect(); if let Some(size) = termsize::get() { let mut grid = Grid::new(GridOptions { filling: Filling::Spaces(2), direction: Direction::TopToBottom, }); for name in names { grid.add(name); } if let Some(output) = grid.fit_into_width(size.cols as usize) { print!("{}", output); return; } } // Couldn't display a grid, either because we don't know // the terminal width or because fit_into_width failed for i in items { let md = get_metadata(i, options); if let Ok(md) = md { println!("{}", display_file_name(&i, strip, &md, options).contents); } } } } fn display_item_long(item: &PathBuf, strip: Option<&Path>, max_links: usize, max_size: usize, options: &getopts::Matches) { let md = match get_metadata(item, options) { Err(e) => { let filename = get_file_name(&item, strip); show_error!("{}: {}", filename, e); return; } Ok(md) => md, }; println!("{}{} {} {} {} {} {} {}", display_file_type(md.file_type()), display_permissions(&md), pad_left(display_symlink_count(&md), max_links), display_uname(&md), display_group(&md), pad_left(display_file_size(&md, options), max_size), display_date(&md, options), display_file_name(&item, strip, &md, options).contents); } // Currently getpwuid is `linux` target only. If it's broken out into // a posix-compliant attribute this can be updated... #[cfg(unix)] use uucore::entries; #[cfg(unix)] fn display_uname(metadata: &Metadata) -> String { entries::uid2usr(metadata.uid()).unwrap_or(metadata.uid().to_string()) } #[cfg(unix)] fn display_group(metadata: &Metadata) -> String { entries::gid2grp(metadata.gid()).unwrap_or(metadata.gid().to_string()) } #[cfg(not(unix))] #[allow(unused_variables)] fn display_uname(metadata: &Metadata) -> String { "somebody".to_string() } #[cfg(not(unix))] #[allow(unused_variables)] fn display_group(metadata: &Metadata) -> String { "somegroup".to_string() } #[cfg(unix)] fn display_date(metadata: &Metadata, options: &getopts::Matches) -> String { let secs = if options.opt_present("c") { metadata.ctime() } else { metadata.mtime() }; let time = time::at(Timespec::new(secs, 0)); strftime("%F %R", &time).unwrap() } #[cfg(not(unix))] fn display_date(metadata: &Metadata, options: &getopts::Matches) -> String { if let Ok(mtime) = metadata.modified() { let time = time::at(Timespec::new(mtime.duration_since(std::time::UNIX_EPOCH) .unwrap() .as_secs() as i64, 0)); strftime("%F %R", &time).unwrap() } else { "???".to_string() } } fn display_file_size(metadata: &Metadata, options: &getopts::Matches) -> String { if options.opt_present("human-readable") { convert(metadata.len() as f64) } else { metadata.len().to_string() } } fn display_file_type(file_type: FileType) -> String { if file_type.is_dir() { "d".to_string() } else if file_type.is_symlink() { "l".to_string() } else { "-".to_string() } } fn get_file_name(name: &Path, strip: Option<&Path>) -> String { let mut name = match strip { Some(prefix) => name.strip_prefix(prefix).unwrap_or(name), None => name, }; if name.as_os_str().len() == 0 { name = Path::new("."); } name.to_string_lossy().into_owned() } #[cfg(not(unix))] fn display_file_name(path: &Path, strip: Option<&Path>, metadata: &Metadata, options: &getopts::Matches) -> Cell { let mut name = get_file_name(path, strip); if options.opt_present("classify") { let file_type = metadata.file_type(); if file_type.is_dir() { name.push('/'); } else if file_type.is_symlink() { name.push('@'); } } name.into() } fn colo
e: String, typ: &str) -> String { let mut typ = typ; if !COLOR_MAP.contains_key(typ) { if typ == "or" { typ = "ln"; } else if typ == "mi" { typ = "fi"; } }; if let Some(code) = COLOR_MAP.get(typ) { format!("{}{}{}{}{}{}{}{}", *LEFT_CODE, code, *RIGHT_CODE, name, *END_CODE, *LEFT_CODE, *RESET_CODE, *RIGHT_CODE, ) } else { name } } macro_rules! has { ($mode:expr, $perm:expr) => ( $mode & ($perm as mode_t) != 0 ) } #[cfg(unix)] fn display_file_name(path: &Path, strip: Option<&Path>, metadata: &Metadata, options: &getopts::Matches) -> Cell { let mut name = get_file_name(path, strip); let mut width = UnicodeWidthStr::width(&*name); let color = options.opt_present("color"); let classify = options.opt_present("classify"); let ext; if color || classify { let file_type = metadata.file_type(); let (code, sym) = if file_type.is_dir() { ("di", Some('/')) } else if file_type.is_symlink() { if path.exists() { ("ln", Some('@')) } else { ("or", Some('@')) } } else if file_type.is_socket() { ("so", Some('=')) } else if file_type.is_fifo() { ("pi", Some('|')) } else if file_type.is_block_device() { ("bd", None) } else if file_type.is_char_device() { ("cd", None) } else if file_type.is_file() { let mode = metadata.mode() as mode_t; let sym = if has!(mode, S_IXUSR | S_IXGRP | S_IXOTH) { Some('*') } else { None }; if has!(mode, S_ISUID) { ("su", sym) } else if has!(mode, S_ISGID) { ("sg", sym) } else if has!(mode, S_ISVTX) && has!(mode, S_IWOTH) { ("tw", sym) } else if has!(mode, S_ISVTX) { ("st", sym) } else if has!(mode, S_IWOTH) { ("ow", sym) } else if has!(mode, S_IXUSR | S_IXGRP | S_IXOTH) { ("ex", sym) } else if metadata.nlink() > 1 { ("mh", sym) } else if let Some(e) = path.extension() { ext = format!("*.{}", e.to_string_lossy()); (ext.as_str(), None) } else { ("fi", None) } } else { ("", None) }; if color { name = color_name(name, code); } if classify { if let Some(s) = sym { name.push(s); width += 1; } } } if options.opt_present("long") && metadata.file_type().is_symlink() { if let Ok(target) = path.read_link() { // We don't bother updating width here because it's not used for long listings let code = if target.exists() { "fi" } else { "mi" }; let target_name = color_name(target.to_string_lossy().to_string(), code); name.push_str(" -> "); name.push_str(&target_name); } } Cell { contents: name, width: width, } } #[cfg(not(unix))] #[allow(unused_variables)] fn display_symlink_count(metadata: &Metadata) -> String { // Currently not sure of how to get this on Windows, so I'm punting. // Git Bash looks like it may do the same thing. String::from("1") } #[cfg(unix)] fn display_symlink_count(metadata: &Metadata) -> String { metadata.nlink().to_string() } #[cfg(not(unix))] #[allow(unused_variables)] fn display_permissions(metadata: &Metadata) -> String { String::from("---------") } #[cfg(unix)] fn display_permissions(metadata: &Metadata) -> String { let mode = metadata.mode() as mode_t; let mut result = String::with_capacity(9); result.push(if has!(mode, S_IRUSR) { 'r' } else { '-' }); result.push(if has!(mode, S_IWUSR) { 'w' } else { '-' }); result.push(if has!(mode, S_ISUID) { if has!(mode, S_IXUSR) { 's' } else { 'S' } } else if has!(mode, S_IXUSR) { 'x' } else { '-' }); result.push(if has!(mode, S_IRGRP) { 'r' } else { '-' }); result.push(if has!(mode, S_IWGRP) { 'w' } else { '-' }); result.push(if has!(mode, S_ISGID) { if has!(mode, S_IXGRP) { 's' } else { 'S' } } else if has!(mode, S_IXGRP) { 'x' } else { '-' }); result.push(if has!(mode, S_IROTH) { 'r' } else { '-' }); result.push(if has!(mode, S_IWOTH) { 'w' } else { '-' }); result.push(if has!(mode, S_ISVTX) { if has!(mode, S_IXOTH) { 't' } else { 'T' } } else if has!(mode, S_IXOTH) { 'x' } else { '-' }); result }
r_name(nam
edit_in_place.rs
//! Structural editing for ast. use std::iter::empty; use parser::{SyntaxKind, T}; use rowan::SyntaxElement; use crate::{ algo::neighbor, ast::{ self, edit::{AstNodeEdit, IndentLevel}, make, GenericParamsOwner, }, ted::{self, Position}, AstNode, AstToken, Direction, SyntaxNode, }; use super::NameOwner; pub trait GenericParamsOwnerEdit: ast::GenericParamsOwner + AstNodeEdit { fn get_or_create_generic_param_list(&self) -> ast::GenericParamList; fn get_or_create_where_clause(&self) -> ast::WhereClause; } impl GenericParamsOwnerEdit for ast::Fn { fn get_or_create_generic_param_list(&self) -> ast::GenericParamList { match self.generic_param_list() { Some(it) => it, None => { let position = if let Some(name) = self.name() { Position::after(name.syntax) } else if let Some(fn_token) = self.fn_token() { Position::after(fn_token) } else if let Some(param_list) = self.param_list() { Position::before(param_list.syntax) } else { Position::last_child_of(self.syntax()) }; create_generic_param_list(position) } } } fn get_or_create_where_clause(&self) -> ast::WhereClause { if self.where_clause().is_none() { let position = if let Some(ty) = self.ret_type() { Position::after(ty.syntax()) } else if let Some(param_list) = self.param_list() { Position::after(param_list.syntax()) } else { Position::last_child_of(self.syntax()) }; create_where_clause(position) } self.where_clause().unwrap() } } impl GenericParamsOwnerEdit for ast::Impl { fn get_or_create_generic_param_list(&self) -> ast::GenericParamList { match self.generic_param_list() { Some(it) => it, None => { let position = if let Some(imp_token) = self.impl_token() { Position::after(imp_token) } else { Position::last_child_of(self.syntax()) }; create_generic_param_list(position) } } } fn get_or_create_where_clause(&self) -> ast::WhereClause { if self.where_clause().is_none() { let position = if let Some(items) = self.assoc_item_list() { Position::before(items.syntax()) } else { Position::last_child_of(self.syntax()) }; create_where_clause(position) } self.where_clause().unwrap() } } impl GenericParamsOwnerEdit for ast::Trait { fn get_or_create_generic_param_list(&self) -> ast::GenericParamList { match self.generic_param_list() { Some(it) => it, None => { let position = if let Some(name) = self.name() { Position::after(name.syntax) } else if let Some(trait_token) = self.trait_token() { Position::after(trait_token) } else { Position::last_child_of(self.syntax()) }; create_generic_param_list(position) } } } fn
(&self) -> ast::WhereClause { if self.where_clause().is_none() { let position = if let Some(items) = self.assoc_item_list() { Position::before(items.syntax()) } else { Position::last_child_of(self.syntax()) }; create_where_clause(position) } self.where_clause().unwrap() } } impl GenericParamsOwnerEdit for ast::Struct { fn get_or_create_generic_param_list(&self) -> ast::GenericParamList { match self.generic_param_list() { Some(it) => it, None => { let position = if let Some(name) = self.name() { Position::after(name.syntax) } else if let Some(struct_token) = self.struct_token() { Position::after(struct_token) } else { Position::last_child_of(self.syntax()) }; create_generic_param_list(position) } } } fn get_or_create_where_clause(&self) -> ast::WhereClause { if self.where_clause().is_none() { let tfl = self.field_list().and_then(|fl| match fl { ast::FieldList::RecordFieldList(_) => None, ast::FieldList::TupleFieldList(it) => Some(it), }); let position = if let Some(tfl) = tfl { Position::after(tfl.syntax()) } else if let Some(gpl) = self.generic_param_list() { Position::after(gpl.syntax()) } else if let Some(name) = self.name() { Position::after(name.syntax()) } else { Position::last_child_of(self.syntax()) }; create_where_clause(position) } self.where_clause().unwrap() } } impl GenericParamsOwnerEdit for ast::Enum { fn get_or_create_generic_param_list(&self) -> ast::GenericParamList { match self.generic_param_list() { Some(it) => it, None => { let position = if let Some(name) = self.name() { Position::after(name.syntax) } else if let Some(enum_token) = self.enum_token() { Position::after(enum_token) } else { Position::last_child_of(self.syntax()) }; create_generic_param_list(position) } } } fn get_or_create_where_clause(&self) -> ast::WhereClause { if self.where_clause().is_none() { let position = if let Some(gpl) = self.generic_param_list() { Position::after(gpl.syntax()) } else if let Some(name) = self.name() { Position::after(name.syntax()) } else { Position::last_child_of(self.syntax()) }; create_where_clause(position) } self.where_clause().unwrap() } } fn create_where_clause(position: Position) { let where_clause = make::where_clause(empty()).clone_for_update(); ted::insert(position, where_clause.syntax()); } fn create_generic_param_list(position: Position) -> ast::GenericParamList { let gpl = make::generic_param_list(empty()).clone_for_update(); ted::insert_raw(position, gpl.syntax()); gpl } impl ast::GenericParamList { pub fn add_generic_param(&self, generic_param: ast::GenericParam) { match self.generic_params().last() { Some(last_param) => { let position = Position::after(last_param.syntax()); let elements = vec![ make::token(T![,]).into(), make::tokens::single_space().into(), generic_param.syntax().clone().into(), ]; ted::insert_all(position, elements); } None => { let after_l_angle = Position::after(self.l_angle_token().unwrap()); ted::insert(after_l_angle, generic_param.syntax()) } } } } impl ast::WhereClause { pub fn add_predicate(&self, predicate: ast::WherePred) { if let Some(pred) = self.predicates().last() { if !pred.syntax().siblings_with_tokens(Direction::Next).any(|it| it.kind() == T![,]) { ted::append_child_raw(self.syntax(), make::token(T![,])); } } ted::append_child(self.syntax(), predicate.syntax()) } } impl ast::TypeBoundList { pub fn remove(&self) { if let Some(colon) = self.syntax().siblings_with_tokens(Direction::Prev).find(|it| it.kind() == T![:]) { ted::remove_all(colon..=self.syntax().clone().into()) } else { ted::remove(self.syntax()) } } } impl ast::PathSegment { pub fn get_or_create_generic_arg_list(&self) -> ast::GenericArgList { if self.generic_arg_list().is_none() { let arg_list = make::generic_arg_list().clone_for_update(); ted::append_child(self.syntax(), arg_list.syntax()) } self.generic_arg_list().unwrap() } } impl ast::UseTree { pub fn remove(&self) { for &dir in [Direction::Next, Direction::Prev].iter() { if let Some(next_use_tree) = neighbor(self, dir) { let separators = self .syntax() .siblings_with_tokens(dir) .skip(1) .take_while(|it| it.as_node() != Some(next_use_tree.syntax())); ted::remove_all_iter(separators); break; } } ted::remove(self.syntax()) } } impl ast::Use { pub fn remove(&self) { let next_ws = self .syntax() .next_sibling_or_token() .and_then(|it| it.into_token()) .and_then(ast::Whitespace::cast); if let Some(next_ws) = next_ws { let ws_text = next_ws.syntax().text(); if let Some(rest) = ws_text.strip_prefix('\n') { if rest.is_empty() { ted::remove(next_ws.syntax()) } else { ted::replace(next_ws.syntax(), make::tokens::whitespace(rest)) } } } ted::remove(self.syntax()) } } impl ast::Impl { pub fn get_or_create_assoc_item_list(&self) -> ast::AssocItemList { if self.assoc_item_list().is_none() { let assoc_item_list = make::assoc_item_list().clone_for_update(); ted::append_child(self.syntax(), assoc_item_list.syntax()); } self.assoc_item_list().unwrap() } } impl ast::AssocItemList { pub fn add_item(&self, item: ast::AssocItem) { let (indent, position, whitespace) = match self.assoc_items().last() { Some(last_item) => ( IndentLevel::from_node(last_item.syntax()), Position::after(last_item.syntax()), "\n\n", ), None => match self.l_curly_token() { Some(l_curly) => { normalize_ws_between_braces(self.syntax()); (IndentLevel::from_token(&l_curly) + 1, Position::after(&l_curly), "\n") } None => (IndentLevel::single(), Position::last_child_of(self.syntax()), "\n"), }, }; let elements: Vec<SyntaxElement<_>> = vec![ make::tokens::whitespace(&format!("{}{}", whitespace, indent)).into(), item.syntax().clone().into(), ]; ted::insert_all(position, elements); } } impl ast::Fn { pub fn get_or_create_body(&self) -> ast::BlockExpr { if self.body().is_none() { let body = make::ext::empty_block_expr().clone_for_update(); match self.semicolon_token() { Some(semi) => { ted::replace(semi, body.syntax()); ted::insert(Position::before(body.syntax), make::tokens::single_space()); } None => ted::append_child(self.syntax(), body.syntax()), } } self.body().unwrap() } } impl ast::MatchArm { pub fn remove(&self) { if let Some(sibling) = self.syntax().prev_sibling_or_token() { if sibling.kind() == SyntaxKind::WHITESPACE { ted::remove(sibling); } } if let Some(sibling) = self.syntax().next_sibling_or_token() { if sibling.kind() == T![,] { ted::remove(sibling); } } ted::remove(self.syntax()); } } impl ast::MatchArmList { pub fn add_arm(&self, arm: ast::MatchArm) { normalize_ws_between_braces(self.syntax()); let mut elements = Vec::new(); let position = match self.arms().last() { Some(last_arm) => { let comma = last_arm .syntax() .siblings_with_tokens(Direction::Next) .find(|it| it.kind() == T![,]); if needs_comma(&last_arm) && comma.is_none() { elements.push(make::token(SyntaxKind::COMMA).into()); } Position::after(comma.unwrap_or_else(|| last_arm.syntax().clone().into())) } None => match self.l_curly_token() { Some(it) => Position::after(it), None => Position::last_child_of(self.syntax()), }, }; let indent = IndentLevel::from_node(self.syntax()) + 1; elements.push(make::tokens::whitespace(&format!("\n{}", indent)).into()); elements.push(arm.syntax().clone().into()); if needs_comma(&arm) { elements.push(make::token(SyntaxKind::COMMA).into()); } ted::insert_all(position, elements); fn needs_comma(arm: &ast::MatchArm) -> bool { arm.expr().map_or(false, |e| !e.is_block_like()) } } } impl ast::RecordExprFieldList { pub fn add_field(&self, field: ast::RecordExprField) { let is_multiline = self.syntax().text().contains_char('\n'); let whitespace = if is_multiline { let indent = IndentLevel::from_node(self.syntax()) + 1; make::tokens::whitespace(&format!("\n{}", indent)) } else { make::tokens::single_space() }; if is_multiline { normalize_ws_between_braces(self.syntax()); } let position = match self.fields().last() { Some(last_field) => { let comma = match last_field .syntax() .siblings_with_tokens(Direction::Next) .filter_map(|it| it.into_token()) .find(|it| it.kind() == T![,]) { Some(it) => it, None => { let comma = ast::make::token(T![,]); ted::insert(Position::after(last_field.syntax()), &comma); comma } }; Position::after(comma) } None => match self.l_curly_token() { Some(it) => Position::after(it), None => Position::last_child_of(self.syntax()), }, }; ted::insert_all(position, vec![whitespace.into(), field.syntax().clone().into()]); if is_multiline { ted::insert(Position::after(field.syntax()), ast::make::token(T![,])); } } } fn normalize_ws_between_braces(node: &SyntaxNode) -> Option<()> { let l = node .children_with_tokens() .filter_map(|it| it.into_token()) .find(|it| it.kind() == T!['{'])?; let r = node .children_with_tokens() .filter_map(|it| it.into_token()) .find(|it| it.kind() == T!['}'])?; let indent = IndentLevel::from_node(node); match l.next_sibling_or_token() { Some(ws) if ws.kind() == SyntaxKind::WHITESPACE => { if ws.next_sibling_or_token()?.into_token()? == r { ted::replace(ws, make::tokens::whitespace(&format!("\n{}", indent))); } } Some(ws) if ws.kind() == T!['}'] => { ted::insert(Position::after(l), make::tokens::whitespace(&format!("\n{}", indent))); } _ => (), } Some(()) } #[cfg(test)] mod tests { use std::fmt; use crate::SourceFile; use super::*; fn ast_mut_from_text<N: AstNode>(text: &str) -> N { let parse = SourceFile::parse(text); parse.tree().syntax().descendants().find_map(N::cast).unwrap().clone_for_update() } #[test] fn test_create_generic_param_list() { fn check_create_gpl<N: GenericParamsOwnerEdit + fmt::Display>(before: &str, after: &str) { let gpl_owner = ast_mut_from_text::<N>(before); gpl_owner.get_or_create_generic_param_list(); assert_eq!(gpl_owner.to_string(), after); } check_create_gpl::<ast::Fn>("fn foo", "fn foo<>"); check_create_gpl::<ast::Fn>("fn foo() {}", "fn foo<>() {}"); check_create_gpl::<ast::Impl>("impl", "impl<>"); check_create_gpl::<ast::Impl>("impl Struct {}", "impl<> Struct {}"); check_create_gpl::<ast::Impl>("impl Trait for Struct {}", "impl<> Trait for Struct {}"); check_create_gpl::<ast::Trait>("trait Trait<>", "trait Trait<>"); check_create_gpl::<ast::Trait>("trait Trait<> {}", "trait Trait<> {}"); check_create_gpl::<ast::Struct>("struct A", "struct A<>"); check_create_gpl::<ast::Struct>("struct A;", "struct A<>;"); check_create_gpl::<ast::Struct>("struct A();", "struct A<>();"); check_create_gpl::<ast::Struct>("struct A {}", "struct A<> {}"); check_create_gpl::<ast::Enum>("enum E", "enum E<>"); check_create_gpl::<ast::Enum>("enum E {", "enum E<> {"); } }
get_or_create_where_clause
CategoriesDetails.component.ts
import { Component, OnInit, HostBinding, EventEmitter, Input, Output, trigger, transition, animate, style, state } from '@angular/core'; import { Router, ActivatedRoute, Params } from '@angular/router'; import { Observable } from 'rxjs/Observable'; import 'rxjs/add/operator/switchMap'; import { ToastsManager } from 'ng2-toastr'; import { CategoriesData, CategoriesService } from './Categories.service'; @Component({ templateUrl: './CategoriesDetails.component.html', providers: [CategoriesService ], animations: [ trigger('routeAnimation', [ state('*', style({ opacity: 1, transform: 'translateX(0)' }) ), transition(':enter', [ style({ opacity: 0, transform: 'translateX(-100%)' }), animate('0.2s ease-in') ]), transition(':leave', [ animate('0.5s ease-out', style({ opacity: 0, transform: 'translateY(100%)' })) ]) ]) ] }) export class CategoriesDetailsComponent implements OnInit { @HostBinding('@routeAnimation') get routeAnimation() { return true; } @HostBinding('style.display') get display() { return 'block'; } // @HostBinding('style.position') get position() { // return 'absolute'; // } objCategories: CategoriesData; errorMessage: string;
// Lookup Arrays constructor(private route: ActivatedRoute, private router: Router, private toastr: ToastsManager, private CategoriesService: CategoriesService ) { // this.id = parseInt(params.get('id')); this.objCategories = new CategoriesData(); } ngOnInit() { this.route.params // (+) converts string 'id' to a number // .switchMap((params: Params) => this.CategoriesService.getByID(+params['id'])) .switchMap((params: Params) => this.CategoriesService.getByID(params['id'])) .subscribe((item: CategoriesData) => this.objCategories = item); this.getLookups(); } deleteCategories(id: number) { if (window.confirm('Are you sure you want to delete this Categories?') == true) { this.CategoriesService.deleteCategories(this.objCategories.CategoryID.toString()) .subscribe(record => this.router.navigate(['/Categories']), error => this.errorMessage = 'There was an error while deleting record. Error: ' + <any>error, () => { console.log('Categories record deleted successfully...'); } ); } } gotoCategoriesEdit() { this.router.navigate(['/Categories', this.objCategories.CategoryID]); } gotoCategories() { this.toastr.success('Back to Categories List...'); this.router.navigate(['/Categories']); } getLookups() { } log(msg: string) { this.messages.splice(0, 0, msg); console.log(msg); } }
messages: string[];
item_config_type.rs
use serde_json::json; use crate::common::{Element, SkillType}; pub enum ItemConfigType { Float { min: f64, max: f64, default: f64, }, Int { min: i32, max: i32, default: i32 }, IntInput { min: i32, max: i32, default: i32 }, Bool { default: bool }, Option { options: &'static str, // comma separated default: usize }, // NullOrValueInput { // min: f64, // max: f64, // default: f64, // }, FloatPercentageInput { default: f64, }, FloatInput { default: f64, }, Element4 { // cryo, pyro, electro, hydro default: Element }, Element8 { default: Element }, Skill4 { default: SkillType } } pub struct ItemConfig { pub title: &'static str, pub name: &'static str, pub config: ItemConfigType, } impl ItemConfigType { pub fn to_json(&self, title: &str, name: &str) -> String { let j = match *self { // ItemConfigType::NullOrValueInput { min, max, default } => { // json!({ // "type": "nullOrValueInput", // "title": title, // "name": name, // "min": min, // "max": max, // "default": default // }) // }, ItemConfigType::Skill4 { default } => { json!({ "type": "skill4", "title": title, "name": name, "default": default }) }, ItemConfigType::IntInput { min, max, default } => { json!({ "type": "intInput", "title": title, "name": name, "min": min, "max": max, "default": default }) }, ItemConfigType::Element4 { default } => { json!({ "type": "element4", "title": title, "name": name, "default": default }) }, ItemConfigType::Element8 { default } => { json!({ "type": "element8", "title": title, "name": name, "default": default }) }, ItemConfigType::FloatPercentageInput { default } => { json!({ "type": "floatPercentageInput", "title": title, "name": name, "default": default }) }, ItemConfigType::FloatInput { default } => { json!({ "type": "floatInput", "title": title, "name": name, "default": default }) } ItemConfigType::Float { min, max, default } => { json!({ "type": "float", "title": title, "name": name, "min": min, "max": max, "default": default }) }, ItemConfigType::Int { min, max, default } => { json!({ "type": "int", "title": title, "name": name, "min": min, "max": max, "default": default }) }, ItemConfigType::Bool { default } => { json!({ "type": "bool", "title": title, "name": name, "default": default }) }, ItemConfigType::Option { options, default } => { let temp: Vec<&str> = options.split(",").collect(); json!({ "type": "option", "title": title, "name": name, "default": default, "options": temp }) } }; j.to_string() } } impl ItemConfig { pub const DEFAULT_RATE_TITLE: &'static str = "被动应用比例"; pub const DEFAULT_STACK_TITLE: &'static str = "被动等效层数"; pub const DEFAULT_RECHARGE_TITLE: &'static str = "充能需求"; pub const DEFAULT_BUFF_TITLE: &'static str = "数值"; pub const RATE01_TYPE: ItemConfigType = ItemConfigType::Float { min: 0.0, max: 1.0, default: 0.0 }; pub const RATE01: ItemConfig = ItemConfig { name: "rate", title: Self::DEFAULT_RATE_TITLE, config: ItemConfigType::Float { min: 0.0, max: 1.0, default: 0.0 } }; pub const STACK02: ItemConfig = ItemConfig { name: "stack", title: Self::DEFAULT_STACK_TITLE, config: ItemConfigType::Float { min: 0.0, max: 2.0, default: 0.0 } }; pub const STACK03: ItemConfig = ItemConfig { name: "stack", title: Self::DEFAULT_STACK_TITLE, config: ItemConfigType::Float { min: 0.0, max: 3.0, default: 0.0 } };
pub const BUFFV1: ItemConfig = ItemConfig { name: "value", title: Self::DEFAULT_BUFF_TITLE, config: ItemConfigType::FloatInput { default: 0.0 } }; pub const REFINE: ItemConfig = ItemConfig { name: "refine", title: "精炼", config: ItemConfigType::IntInput { min: 1, max: 5, default: 1 } }; pub fn to_json(&self) -> String { self.config.to_json(self.title, self.name) } }
pub const STACK04: ItemConfig = ItemConfig { name: "stack", title: Self::DEFAULT_STACK_TITLE, config: ItemConfigType::Float { min: 0.0, max: 4.0, default: 0.0 } }; pub const STACK05: ItemConfig = ItemConfig { name: "stack", title: Self::DEFAULT_STACK_TITLE, config: ItemConfigType::Float { min: 0.0, max: 5.0, default: 0.0 } }; pub const BUFFV1P: ItemConfig = ItemConfig { name: "p", title: Self::DEFAULT_BUFF_TITLE, config: ItemConfigType::FloatPercentageInput { default: 0.0 } };
normalize.go
package dsl import ( "fmt" . "github.com/glycerine/zygomys/zygo" floats "gonum.org/v1/gonum/floats" stat "gonum.org/v1/gonum/stat" ) func NumNorm(data []float64) []float64 { res := make([]float64, len(data)) xmin := floats.Min(data) xmax := floats.Max(data) diff := xmax - xmin if diff == 0 { for i := 0; i < len(data); i++ { res[i] = 0.0 } } else { for i := 0; i < len(data); i++ { res[i] = (data[i] - xmin) / diff } } return res } func NumStand(data []float64) []float64 { xmean := stat.Mean(data, nil) xdev := stat.StdDev(data, nil) res := make([]float64, len(data)) for i := 0; i < len(data); i++ { res[i] = (data[i] - xmean) / xdev } return res } func NormalizeAll(env *Zlisp, name string, args []Sexp) (Sexp, error) { arr := make([]float64, 0) if len(args) != 1 { return SexpNull, WrongNargs } switch e := args[0].(type) { case *SexpArray: arr = ArrayofFloatsToArray(e) default: return SexpNull, fmt.Errorf("First argument must be array") } switch name { case "normalize.Normalize": res := NumNorm(arr) return ArrayofFloatsToFloatLispArray(env, res), nil case "normalize.Standard": res := NumStand(arr) return ArrayofFloatsToFloatLispArray(env, res), nil } return SexpNull, fmt.Errorf("Requested normalization computation can not be performed: %v", name) } func NormalizeFunctions() map[string]ZlispUserFunction
func NormalizePackageSetup(cfg *ZlispConfig, env *Zlisp) { myPkg := `(def normalize (package "normalize" { Normalize := normalizen; Standard := normalizes; } ))` _, err := env.EvalString(myPkg) PanicOn(err) }
{ return map[string]ZlispUserFunction{ "normalizen": NormalizeAll, "normalizes": NormalizeAll, } }
app.component.ts
import { Component, OnInit } from '@angular/core'; import { HeroService } from './hero.service'; import { HeroesComponent } from './heroes.component'; import { HeroDetailComponent } from './hero-detail.component'; import { DashboardComponent } from './dashboard.component'; import { RouteConfig, ROUTER_DIRECTIVES, ROUTER_PROVIDERS } from '@angular/router-deprecated'; @Component({ selector: 'my-app', template: ` <h1>{{title}}</h1> <nav> <a [routerLink]="['Heroes']">Heroes</a> <a [routerLink]="['Dashboard']">Dashboard</a> <router-outlet></router-outlet> </nav> `, styleUrls: ['app/app.component.css'], directives: [ROUTER_DIRECTIVES], providers: [ ROUTER_PROVIDERS, HeroService ] }) @RouteConfig([ { path: '/detail/:id', name: 'HeroDetail', component: HeroDetailComponent }, { path: '/dashboard', name: 'Dashboard', component: DashboardComponent, useAsDefault: true }, { path: '/heroes', name: 'Heroes', component: HeroesComponent } ]) export class
{ title = 'Tour of Heroes'; }
AppComponent
webpack.config.shared.js
function
(config) { const packageJson = require('./package.json'); const vendorDependencies = Object.keys(packageJson['dependencies']); const vendorModulesMinusExclusions = vendorDependencies.filter(vendorModule => config.mainModules.indexOf(vendorModule) === -1 && config.modulesToExclude.indexOf(vendorModule) === -1); return vendorModulesMinusExclusions; } exports.makeVendorEntry = makeVendorEntry;
makeVendorEntry
test_quoted_or_list.py
from pytest import raises from ..quoted_or_list import quoted_or_list def test_does_not_accept_an_empty_list(): with raises(StopIteration): quoted_or_list([]) def test_returns_single_quoted_item(): assert quoted_or_list(["A"]) == '"A"'
assert quoted_or_list(["A", "B"]) == '"A" or "B"' def test_returns_comma_separated_many_item_list(): assert quoted_or_list(["A", "B", "C"]) == '"A", "B" or "C"' def test_limits_to_five_items(): assert quoted_or_list(["A", "B", "C", "D", "E", "F"]) == '"A", "B", "C", "D" or "E"'
def test_returns_two_item_list():
test_op_level5.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Support level5 operator test cases. """ import math import numpy as np import tvm from tvm import relay from tvm.relay import transform from tvm.relay.testing import ctx_list import topi.testing def run_infer_type(expr): mod = relay.Module.from_expr(expr) mod = transform.InferType()(mod) entry = mod["main"] return entry if isinstance(expr, relay.Function) else entry.body def test_resize_infer_type(): n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) th, tw = tvm.var("th"), tvm.var("tw") z = relay.image.resize(x, (th, tw)) zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((n, c, th, tw), "int8") x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) z= relay.image.resize(x, (100, 200), "NCHW", "bilinear", True) assert "size=" in z.astext() zz = run_infer_type(z) assert zz.checked_type == relay.TensorType((n, c, 100, 200), "int8") def test_resize(): def verify_resize(dshape, scale, method, layout): if layout == "NHWC": size = (dshape[1] * scale, dshape[2] * scale) else: size = (dshape[2] * scale, dshape[3] * scale) x_data = np.random.uniform(size=dshape).astype("float32") if method == "bilinear": ref_res = topi.testing.bilinear_resize_python(x_data, size, layout) else: ref_res = topi.testing.upsampling_python(x_data, (scale, scale), layout) x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.image.resize(x, size, layout, method, True) assert "size=" in z.astext() zz = run_infer_type(z) assert zz.checked_type == relay.TensorType(ref_res.shape, "float32") func = relay.Function([x], z) for target, ctx in ctx_list(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, ctx=ctx, target=target) op_res = intrp.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-4) for method in ["bilinear", "nearest_neighbor"]: for layout in ["NHWC", "NCHW"]: verify_resize((1, 4, 4, 4), 2, method, layout) def test_multibox_prior(): def get_ref_result(dshape, sizes=(1.0,), ratios=(1.0,), steps=(-1.0, -1.0), offsets=(0.5, 0.5), clip=True): in_height = dshape[2] in_width = dshape[3] num_sizes = len(sizes) num_ratios = len(ratios) size_ratio_concat = sizes + ratios steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width offset_h = offsets[0] offset_w = offsets[1] oshape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4) dtype = "float32" np_out = np.zeros(oshape).astype(dtype) for i in range(in_height): center_h = (i + offset_h) * steps_h for j in range(in_width): center_w = (j + offset_w) * steps_w for k in range(num_sizes + num_ratios - 1): w = size_ratio_concat[k] * in_height / in_width / 2.0 if k < num_sizes else \ size_ratio_concat[0] * in_height / in_width * math.sqrt(size_ratio_concat[k + 1]) / 2.0 h = size_ratio_concat[k] / 2.0 if k < num_sizes else \ size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0 count = i * in_width * (num_sizes + num_ratios - 1) + j * (num_sizes + num_ratios - 1) + k np_out[0][count][0] = center_w - w np_out[0][count][1] = center_h - h np_out[0][count][2] = center_w + w np_out[0][count][3] = center_h + h if clip: np_out = np.clip(np_out, 0, 1) return np_out def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,), ratios=(1.0,), steps=(-1.0, -1.0), offsets=(0.5, 0.5), clip=True, check_size=False, check_type_only=False): z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip) zz = run_infer_type(z) if check_size: assert "sizes=" in z.astext() assert zz.checked_type == relay.TensorType( (1, dshape[2] * dshape[3] * (len(sizes) + len(ratios) - 1), 4), "float32") if check_type_only: return data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32") func = relay.Function([x], z) func = run_infer_type(func) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) intrp2 = relay.create_executor("debug", ctx=ctx, target=target) op_res2 = intrp2.evaluate(func)(data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) sizes = (0.3, 1.5, 0.7) ratios = (1.3, 2.4) steps = (2.0, 1.5) offsets = (0.2, 0.3) dshape = (1, 3, 56, 56) ref_res = get_ref_result(dshape, sizes, ratios, steps, offsets) x = relay.var("x", relay.TensorType(dshape, "float32")) verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True) y = relay.var("y", relay.TensorType((tvm.var("n"), 3, 56, 56), "float32")) verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True, check_type_only=True) dshape = (1, 24, 32, 32) ref_res = get_ref_result(dshape, clip=False) x = relay.var("x", relay.TensorType(dshape, "float32")) verify_multibox_prior(x, dshape, ref_res, clip=False) y = relay.var("y", relay.TensorType((tvm.var("n"), 24, 32, 32), "float32")) verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True) def test_get_valid_counts(): def verify_get_valid_counts(dshape, score_threshold, id_index, score_index): dtype = "float32" batch_size, num_anchor, elem_length = dshape np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype) np_out1 = np.zeros(shape=(batch_size,)) np_out2 = np.zeros(shape=dshape).astype(dtype) for i in range(batch_size): np_out1[i] = 0 inter_idx = 0 for j in range(num_anchor): score = np_data[i, j, score_index] if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0): for k in range(elem_length): np_out2[i, inter_idx, k] = np_data[i, j, k] np_out1[i] += 1 inter_idx += 1 if j >= np_out1[i]: for k in range(elem_length): np_out2[i, j, k] = -1.0 x = relay.var("x", relay.ty.TensorType(dshape, dtype)) z = relay.vision.get_valid_counts(x, score_threshold, id_index, score_index) assert "score_threshold" in z.astext() func = relay.Function([x], z.astuple()) func = run_infer_type(func) for target, ctx in ctx_list(): if target == 'cuda': return intrp = relay.create_executor("debug", ctx=ctx, target=target) out = intrp.evaluate(func)(np_data) tvm.testing.assert_allclose(out[0].asnumpy(), np_out1, rtol=1e-3, atol=1e-04) tvm.testing.assert_allclose(out[1].asnumpy(), np_out2, rtol=1e-3, atol=1e-04) verify_get_valid_counts((1, 2500, 6), 0, 0, 1) verify_get_valid_counts((1, 2500, 5), -1, -1, 0) verify_get_valid_counts((3, 1000, 6), 0.55, 1, 0) verify_get_valid_counts((16, 500, 5), 0.95, -1, 0) def test_non_max_suppression(): def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res, iou_threshold=0.5, force_suppress=False, top_k=-1, check_type_only=False): x0 = relay.var("x0", relay.ty.TensorType(dshape, "float32")) x1 = relay.var("x1", relay.ty.TensorType((dshape[0],), "int32")) z = relay.vision.non_max_suppression(x0, x1, max_output_size = -1, \ iou_threshold = iou_threshold, force_suppress = force_suppress, \ top_k = top_k, return_indices=False) z_indices = relay.vision.non_max_suppression(x0, x1, max_output_size = -1, \ iou_threshold = iou_threshold, force_suppress = force_suppress, \ top_k = top_k) assert "iou_threshold" in z.astext() assert "iou_threshold" in z_indices.astext() zz = run_infer_type(z) zz_indices = run_infer_type(z_indices) assert zz.checked_type == relay.ty.TensorType(dshape, "float32") assert zz_indices.checked_type == relay.ty.TensorType((dshape[0], dshape[1]), "int32") if check_type_only: return func = relay.Function([x0, x1], z) func = run_infer_type(func) func_indices = relay.Function([x0, x1], z_indices) func_indices = run_infer_type(func_indices) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(x0_data, x1_data) op_indices_res1 = intrp1.evaluate(func_indices)(x0_data, x1_data) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5) tvm.testing.assert_allclose(op_indices_res1.asnumpy(), ref_indices_res, rtol=1e-5) intrp2 = relay.create_executor("debug", ctx=ctx, target=target) op_res2 = intrp2.evaluate(func)(x0_data, x1_data) op_indices_res2 = intrp2.evaluate(func_indices)(x0_data, x1_data) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) tvm.testing.assert_allclose(op_indices_res2.asnumpy(), ref_indices_res, rtol=1e-5) np_data = np.array([[[0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80], [0, 0.4, 4, 21, 19, 40], [2, 0.9, 35, 61, 52, 79], [1, 0.5, 100, 60, 70, 110]]]).astype("float32") np_valid_count = np.array([4]).astype("int32") np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]]) np_indices_result = np.array([[3, 0, -1, -1, -1]]) num_anchors = 5 dshape = (tvm.var("n"), num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, force_suppress=True, top_k=2, check_type_only=True) dshape = (1, num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, force_suppress=True, top_k=2, check_type_only=False) np_result = np.array([[[2, 0.9, 35, 61, 52, 79], [0, 0.8, 1, 20, 25, 45], [1, 0.7, 30, 60, 50, 80], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]]) np_indices_result = np.array([[3, 0, 1, -1, -1]]) dshape = (tvm.var("n"), num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, check_type_only=True) dshape = (1, num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, top_k=3) def test_multibox_transform_loc(): def test_default_value(): num_anchors = 3 num_classes = 3 np_cls_prob = np.array( [[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]]).astype("float32") np_loc_preds = np.array( [[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]]).astype("float32") np_anchors = np.array( [[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]]).astype("float32") expected_np_out = np.array([[[1, 0.69999999, 0, 0, 0.10818365, 0.10008108], [0, 0.44999999, 1, 1, 1, 1], [0, 0.30000001, 0, 0, 0.22903419, 0.20435292]]]) cls_prob = relay.var( "cls_prob", relay.ty.TensorType((1, num_anchors, num_classes), "float32")) loc_pred = relay.var( "loc_pred", relay.ty.TensorType((1, num_anchors * 4), "float32")) anchors = relay.var( "anchors", relay.ty.TensorType((1, num_anchors, 4), "float32")) mtl = relay.vision.multibox_transform_loc( cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors) ret = run_infer_type(mtl.astuple()) ref_type = relay.ty.TupleType( tvm.convert([ relay.ty.TensorType((1, num_anchors, 6), "float32"), relay.ty.TensorType((1, ), "int") ])) assert ret.checked_type == ref_type nms = relay.vision.non_max_suppression(mtl[0], mtl[1], return_indices=False) func = relay.Function([cls_prob, loc_pred, anchors], nms) func = run_infer_type(func) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(np_cls_prob, np_loc_preds, np_anchors) tvm.testing.assert_allclose(op_res1.asnumpy(), expected_np_out, rtol=1e-5) intrp2 = relay.create_executor("debug", ctx=ctx, target=target) op_res2 = intrp2.evaluate(func)(np_cls_prob, np_loc_preds, np_anchors) tvm.testing.assert_allclose(op_res2.asnumpy(), expected_np_out, rtol=1e-5) def test_threshold(): num_anchors = 5 num_classes = 5 n = tvm.var("n") cls_prob = relay.var( "cls_prob", relay.ty.TensorType((n, num_anchors, num_classes), "float32")) loc_pred = relay.var( "loc_pred", relay.ty.TensorType((n, num_anchors * 4), "float32")) anchors = relay.var( "anchors", relay.ty.TensorType((1, num_anchors, 4), "float32")) threshold = 0.02 variances = (0.2, 0.2, 0.3, 0.3) ret = relay.vision.multibox_transform_loc( cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors, threshold=threshold, variances=variances) ret = run_infer_type(ret.astuple()) ref_type = relay.ty.TupleType( tvm.convert([ relay.ty.TensorType((n, num_anchors, 6), "float32"), relay.ty.TensorType((n, ), "int") ])) assert ret.checked_type == ref_type test_default_value() test_threshold() def test_roi_align():
def test_roi_pool(): def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale): data = relay.var("data", relay.ty.TensorType(data_shape, "float32")) rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32")) z = relay.vision.roi_pool(data, rois, pooled_size=(pooled_size, pooled_size), spatial_scale=spatial_scale, layout="NCHW") zz = run_infer_type(z) batch, channel, in_size, _ = data_shape num_roi = rois_shape[0] assert zz.checked_type == relay.ty.TensorType( (num_roi, channel, pooled_size, pooled_size), "float32") func = relay.Function([data, rois], z) func = run_infer_type(func) np_data = np.random.uniform(size=data_shape).astype("float32") np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi).astype('float32') ref_res = topi.testing.roi_pool_nchw_python(np_data, np_rois, pooled_size=pooled_size, spatial_scale=spatial_scale) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(np_data, np_rois) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4) intrp2 = relay.create_executor("debug", ctx=ctx, target=target) op_res2 = intrp2.evaluate(func)(np_data, np_rois) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4) verify_roi_pool((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0) verify_roi_pool((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5) def test_proposal(): def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs): cls_prob = relay.var("cls_prob", relay.ty.TensorType(np_cls_prob.shape, "float32")) bbox_pred = relay.var("bbox_pred", relay.ty.TensorType(np_bbox_pred.shape, "float32")) im_info = relay.var("im_info", relay.ty.TensorType(np_im_info.shape, "float32")) z = relay.vision.proposal(cls_prob, bbox_pred, im_info, **attrs) zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(np_out.shape, "float32") func = relay.Function([cls_prob, bbox_pred, im_info], z) func = run_infer_type(func) for target in ['llvm', 'cuda']: if not tvm.module.enabled(target): print("Skip test because %s is not enabled." % target) continue ctx = tvm.context(target, 0) intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(np_cls_prob, np_bbox_pred, np_im_info) tvm.testing.assert_allclose(op_res1.asnumpy(), np_out, rtol=1e-4) intrp2 = relay.create_executor("debug", ctx=ctx, target=target) op_res2 = intrp2.evaluate(func)(np_cls_prob, np_bbox_pred, np_im_info) tvm.testing.assert_allclose(op_res2.asnumpy(), np_out, rtol=1e-4) attrs = { 'scales': (0.5,), 'ratios': (0.5,), 'feature_stride': 16, 'iou_loss': False, 'rpn_min_size': 16, 'threshold': 0.7, 'rpn_pre_nms_top_n': 200, 'rpn_post_nms_top_n': 4, } np_cls_prob = np.array([[ [[0.3, 0.6, 0.2], [0.4, 0.7, 0.5], [0.1, 0.4, 0.3]], [[0.7, 0.5, 0.3], [0.6, 0.4, 0.8], [0.9, 0.2, 0.5]] ]], dtype='float32') np_bbox_pred = np.array([[ [[0.5, 1.0, 0.6], [0.8, 1.2, 2.0], [0.9, 1.0, 0.8]], [[0.5, 1.0, 0.7], [0.8, 1.2, 1.6], [2.1, 1.5, 0.7]], [[1.0, 0.5, 0.7], [1.5, 0.9, 1.6], [1.4, 1.5, 0.8]], [[1.0, 0.5, 0.6], [1.5, 0.9, 2.0], [1.8, 1.0, 0.9]], ]], dtype='float32') np_im_info = np.array([[48., 48., 1.]], dtype='float32') np_out = np.array([ [0., 0., 2.8451548,28.38012, 18.154846], [0., 0., 15.354933, 41.96971, 41.245064], [0., 18.019852, 1.0538368, 51.98015, 25.946163], [0., 27.320923, -1.266357, 55., 24.666357] ], dtype='float32') verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs) np_out = np.array([ [ 0., -5.25, -2.5, 21.75, 19.], [ 0., 11.25, -2., 37.25, 18.5], [ 0., 26.849998, -2.3000002, 53.45, 18.6], [ 0., -4.95, 13.799999, 22.25, 35.5] ], dtype='float32') attrs['iou_loss'] = True verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs) def test_yolo_reorg_infer_shape(): def verify_yolo_reorg(shape, stride, out_shape): x = relay.var("x", relay.TensorType(shape, "float32")) z = relay.vision.yolo_reorg(x, stride=stride) zz = run_infer_type(z) assert "stride=" in z.astext() assert zz.checked_type == relay.ty.TensorType(out_shape, "float32") n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") idxd = tvm.indexdiv verify_yolo_reorg((n, c, 20, 20), 10, (n, c*10*10, 2, 2)) verify_yolo_reorg((n, c, h, w), 2, (n, c*2*2, idxd(h, 2), idxd(w, 2))) def test_yolo_reorg(): def verify_yolo_reorg(shape, stride): x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32") ref_res = topi.testing.reorg_python(x_data, stride) x = relay.var("x", relay.TensorType(shape, "float32")) z = relay.vision.yolo_reorg(x, stride=stride) zz = run_infer_type(z) assert "stride=" in z.astext() assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32") func = relay.Function([x], z) for target, ctx in ctx_list(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, ctx=ctx, target=target) op_res = intrp.evaluate(func)(x_data) tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) verify_yolo_reorg((1, 100, 20, 20), 10) verify_yolo_reorg((1, 4, 6, 6), 2) def test_deformable_conv2d(): def test_infer_type(batch, in_channel, size, out_channel, deformable_groups, groups): data_shape = (batch, in_channel, size, size) data = relay.var("data", shape=data_shape) offset = relay.var("offset") kernel = relay.var("kernel") kernel_size = (3, 3) y = relay.nn.deformable_conv2d(data, offset, kernel, strides=(1, 1), padding=(1, 1), dilation=(1, 1), kernel_size=kernel_size, deformable_groups=deformable_groups, groups=groups, channels=out_channel) weight_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1]) out_shape = (batch, out_channel, size, size) offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, out_shape[2], out_shape[3]) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(out_shape) assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type assert yy.args[2].checked_type == relay.TensorType(weight_shape) test_infer_type(1, 4, 16, 4, 4, 1) test_infer_type(2, 4, 16, 4, 1, 2) def test_run(batch, in_channel, size, out_channel, deformable_groups, groups): kernel_size = (3, 3) data_shape = (batch, in_channel, size, size) offset_shape = (batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, size, size) kernel_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1]) dtype = 'float32' data = relay.var("data", shape=data_shape, dtype=dtype) offset = relay.var("offset") kernel = relay.var("kernel") y = relay.nn.deformable_conv2d(data, offset, kernel, strides=(1, 1), padding=(1, 1), dilation=(1, 1), kernel_size=kernel_size, deformable_groups=deformable_groups, groups=groups, channels=out_channel) func = relay.Function([data, offset, kernel], y) data = np.random.uniform(size=data_shape).astype(dtype) offset = np.random.uniform(size=offset_shape).astype(dtype) kernel = np.random.uniform(size=kernel_shape).astype(dtype) ref_res = topi.testing.deformable_conv2d_nchw_python(data, offset, kernel, stride=(1, 1), padding=(1, 1), dilation=(1, 1), deformable_groups=deformable_groups, groups=groups) for target, ctx in ctx_list(): for kind in ["graph", "debug"]: intrp1 = relay.create_executor(kind, ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(data, offset, kernel) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) test_run(1, 4, 16, 4, 1, 1) test_run(2, 4, 16, 4, 4, 1) if __name__ == "__main__": test_resize_infer_type() test_resize() test_multibox_prior() test_multibox_transform_loc() test_get_valid_counts() test_roi_align() test_roi_pool() test_proposal() test_yolo_reorg_infer_shape() test_yolo_reorg() test_non_max_suppression() test_deformable_conv2d()
def verify_roi_align(data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio): data = relay.var("data", relay.ty.TensorType(data_shape, "float32")) rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32")) z = relay.vision.roi_align(data, rois, pooled_size=(pooled_size, pooled_size), spatial_scale=spatial_scale, sample_ratio=sample_ratio, layout="NCHW") zz = run_infer_type(z) batch, channel, in_size, _ = data_shape num_roi = rois_shape[0] assert zz.checked_type == relay.ty.TensorType( (num_roi, channel, pooled_size, pooled_size), "float32") func = relay.Function([data, rois], z) func = run_infer_type(func) np_data = np.random.uniform(size=data_shape).astype("float32") np_rois = np.random.uniform(size=rois_shape).astype('float32') * in_size np_rois[:, 0] = np.random.randint(low = 0, high = batch, size = num_roi) ref_res = topi.testing.roi_align_nchw_python(np_data, np_rois, pooled_size=pooled_size, spatial_scale=spatial_scale, sample_ratio=sample_ratio) for target, ctx in ctx_list(): intrp1 = relay.create_executor("graph", ctx=ctx, target=target) op_res1 = intrp1.evaluate(func)(np_data, np_rois) tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-4) intrp2 = relay.create_executor("debug", ctx=ctx, target=target) op_res2 = intrp2.evaluate(func)(np_data, np_rois) tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-4) verify_roi_align((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1) verify_roi_align((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2)
0ccf1c685e7b8ed726bdbc78ca3b67a7.js
load("201224b0d1c296b45befd2285e95dd42.js"); // Breakpoints should be hit on scripts gotten not via Debugger.Frame. var g = newGlobal(); g.eval("function f(x) { return x + 1; }"); g.eval("function g(x) { f(x); }"); // Warm up so f gets OSRed into the jits and g inlines f. g.eval("for (var i = 0; i < 10000; i++) g(i);"); var dbg = new Debugger; var gw = dbg.addDebuggee(g); var fw = gw.getOwnPropertyDescriptor("f").value; var hits = 0;
fw.script.setBreakpoint(0, { hit: function(frame) { hits++; } }); g.eval("g(42);"); assertEq(hits, 1);
Dropdown.d.ts
import * as React from 'react'; import TooltipOptions from '../tooltip/tooltipoptions'; import { CSSTransitionProps } from '../csstransition'; import { VirtualScrollerProps } from '../virtualscroller'; type DropdownOptionGroupTemplateType = React.ReactNode | ((option: any, index: number) => React.ReactNode); type DropdownValueTemplateType = React.ReactNode | ((option: any, props: DropdownProps) => React.ReactNode); type DropdownItemTemplateType = React.ReactNode | ((option: any) => React.ReactNode); type DropdownEmptyMessageType = React.ReactNode | ((props: DropdownProps) => React.ReactNode); type DropdownEmptyFilterMessageType = React.ReactNode | ((props: DropdownProps) => React.ReactNode); type DropdownOptionDisabledType = string | ((option: any) => boolean); type DropdownAppendToType = 'self' | HTMLElement | undefined | null; interface DropdownChangeTargetOptions { name: string; id: string; value: any; } interface DropdownChangeParams { originalEvent: React.SyntheticEvent; value: any; stopPropagation(): void; preventDefault(): void; target: DropdownChangeTargetOptions; } interface DropdownFilterParams { originalEvent: React.SyntheticEvent; filter: string; } export interface DropdownProps { id?: string; inputRef?: React.Ref<HTMLSelectElement>; name?: string; value?: any; options?: any[]; optionLabel?: string; optionValue?: string; optionDisabled?: DropdownOptionDisabledType; optionGroupLabel?: string; optionGroupChildren?: string; optionGroupTemplate?: DropdownOptionGroupTemplateType; valueTemplate?: DropdownValueTemplateType; itemTemplate?: DropdownItemTemplateType; style?: object; className?: string; virtualScrollerOptions?: VirtualScrollerProps; scrollHeight?: string; filter?: boolean; filterBy?: string; filterMatchMode?: string; filterPlaceholder?: string; filterLocale?: string; emptyMessage?: DropdownEmptyMessageType; emptyFilterMessage?: DropdownEmptyFilterMessageType; editable?: boolean; placeholder?: string; required?: boolean; disabled?: boolean; appendTo?: DropdownAppendToType; tabIndex?: number; autoFocus?: boolean; filterInputAutoFocus?: boolean; resetFilterOnHide?: boolean; showFilterClear?: boolean; panelClassName?: string; panelStyle?: object; dataKey?: string; inputId?: string; showClear?: boolean; maxLength?: number; tooltip?: string; tooltipOptions?: TooltipOptions; ariaLabel?: string; ariaLabelledBy?: string; transitionOptions?: CSSTransitionProps; dropdownIcon?: string; showOnFocus?: boolean; onChange?(e: DropdownChangeParams): void; onFocus?(event: React.FocusEvent<HTMLInputElement>): void; onBlur?(event: React.FocusEvent<HTMLInputElement>): void; onMouseDown?(event: React.MouseEvent<HTMLElement>): void; onContextMenu?(event: React.MouseEvent<HTMLElement>): void; onShow?(): void; onHide?(): void; onFilter?(e: DropdownFilterParams): void; } export declare class
extends React.Component<DropdownProps, any> { }
Dropdown
sivr.rs
use crate::local::{LocalApic, LocalApicRegister, LocalApicRegisterIndex, InterruptVector}; bitflags! { pub struct SivrFlags: u32 { const VECTOR = 0b0000_0000_0000_11111111; const APIC_ENABLE = 0b0000_0000_0001_00000000; const FOCUS_PROCESSOR_CHECKING = 0b0000_0000_0010_00000000; const EOI_BROADCAST_SUPRESSION = 0b0000_0001_0000_00000000; const UNUSED = 0b1111_1110_1100_00000000; } } impl SivrFlags { pub fn is_enabled(&self) -> bool { self.contains(SivrFlags::APIC_ENABLE) } pub fn
(&self) -> bool { self.contains(SivrFlags::FOCUS_PROCESSOR_CHECKING) } pub fn is_eoi_broadcast_supressed(&self) -> bool { self.contains(SivrFlags::EOI_BROADCAST_SUPRESSION) } pub fn vector(&self) -> InterruptVector { InterruptVector((*self & SivrFlags::VECTOR).bits()) } } pub struct SpuriousInterruptVectorRegister; impl LocalApicRegister for SpuriousInterruptVectorRegister { type Value = SivrFlags; unsafe fn read(&self, apic: &dyn LocalApic) -> Self::Value { Self::Value::from_bits_truncate(apic.read_reg_32(LocalApicRegisterIndex::SpuriousInterrupt)) } unsafe fn write(&self, apic: &dyn LocalApic, value: Self::Value) { apic.write_reg_32(LocalApicRegisterIndex::SpuriousInterrupt, value.bits()); } }
is_focus_processor_checked
ButtonGroup.test.tsx
import React from 'react'; import { shallow } from 'enzyme'; import ButtonGroup from '../../src/components/ButtonGroup'; import Button from '../../src/components/Button'; describe('<ButtonGroup />', () => { it('renders buttons', () => { const wrapper = shallow( <ButtonGroup> <Button>One</Button> <Button>Two</Button> <Button>Three</Button> </ButtonGroup>, ); expect(wrapper.find(Button)).toHaveLength(3); }); it('renders buttons to the end', () => { const wrapper = shallow( <ButtonGroup endAlign> <Button>One</Button> <Button>Two</Button> <Button>Three</Button> </ButtonGroup>, ); expect(wrapper.prop('className')).toMatch('buttonGroup_endAlign'); expect(wrapper.find(Button)).toHaveLength(3); }); it('renders buttons stacked', () => { const wrapper = shallow( <ButtonGroup stacked> <Button>One</Button> <Button>Two</Button> <Button>Three</Button> </ButtonGroup>, ); expect(wrapper.prop('className')).toMatch('buttonGroup_stacked'); expect(wrapper.find(Button)).toHaveLength(3); }); it('renders a single button', () => { const wrapper = shallow( <ButtonGroup> <Button>One</Button> </ButtonGroup>, ); expect(wrapper.find(Button)).toHaveLength(1); }); it('handles buttons that return falsy values', () => { const wrapper = shallow( <ButtonGroup stacked> {false && <Button>One</Button>} {true && <Button>Two</Button>} {null && <Button>Three</Button>} </ButtonGroup>, ); expect(wrapper.find(Button)).toHaveLength(1); }); it('handles components that return a falsy value', () => { function
() { return null; } const wrapper = shallow( <ButtonGroup stacked> <FakeButton /> </ButtonGroup>, ); expect(wrapper).toMatchSnapshot(); }); });
FakeButton
network.rs
//! openstack metadata fetcher use std::collections::HashMap; use openssh_keys::PublicKey; use errors::*; use network; use providers::MetadataProvider; use retry; const URL: &str = "http://169.254.169.254/latest/meta-data"; #[derive(Clone, Debug)] pub struct OpenstackProvider { client: retry::Client, }
let client = retry::Client::try_new()?; Ok(OpenstackProvider { client }) } fn endpoint_for(key: &str) -> String { format!("{}/{}", URL, key) } fn fetch_keys(&self) -> Result<Vec<String>> { let keys_list: Option<String> = self .client .get(retry::Raw, OpenstackProvider::endpoint_for("public-keys")) .send()?; let mut keys = Vec::new(); if let Some(keys_list) = keys_list { for l in keys_list.lines() { let tokens: Vec<&str> = l.split('=').collect(); if tokens.len() != 2 { return Err("error parsing keyID".into()); } let key: String = self .client .get( retry::Raw, OpenstackProvider::endpoint_for(&format!( "public-keys/{}/openssh-key", tokens[0] )), ) .send()? .ok_or("missing ssh key")?; keys.push(key); } } Ok(keys) } } impl MetadataProvider for OpenstackProvider { fn attributes(&self) -> Result<HashMap<String, String>> { let mut out = HashMap::with_capacity(4); let add_value = |map: &mut HashMap<_, _>, key: &str, name| -> Result<()> { let value = self .client .get(retry::Raw, OpenstackProvider::endpoint_for(name)) .send()?; if let Some(value) = value { map.insert(key.to_string(), value); } Ok(()) }; add_value(&mut out, "OPENSTACK_HOSTNAME", "hostname")?; add_value(&mut out, "OPENSTACK_INSTANCE_ID", "instance-id")?; add_value(&mut out, "OPENSTACK_IPV4_LOCAL", "local-ipv4")?; add_value(&mut out, "OPENSTACK_IPV4_PUBLIC", "public-ipv4")?; Ok(out) } fn hostname(&self) -> Result<Option<String>> { self.client .get(retry::Raw, OpenstackProvider::endpoint_for("hostname")) .send() } fn ssh_keys(&self) -> Result<Vec<PublicKey>> { let mut out = Vec::new(); for key in &self.fetch_keys()? { let key = PublicKey::parse(&key)?; out.push(key); } Ok(out) } fn networks(&self) -> Result<Vec<network::Interface>> { Ok(vec![]) } fn network_devices(&self) -> Result<Vec<network::Device>> { Ok(vec![]) } fn boot_checkin(&self) -> Result<()> { warn!("boot check-in requested, but not supported on this platform"); Ok(()) } }
impl OpenstackProvider { pub fn try_new() -> Result<OpenstackProvider> {
cloudproviders.go
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package models import ( "context" "database/sql" "fmt" "strings" "sync" "time" "yunion.io/x/jsonutils" "yunion.io/x/log" "yunion.io/x/pkg/errors" "yunion.io/x/pkg/tristate" "yunion.io/x/pkg/util/compare" "yunion.io/x/pkg/util/timeutils" "yunion.io/x/pkg/utils" "yunion.io/x/sqlchemy" "yunion.io/x/onecloud/pkg/apis" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/cloudcommon/db" "yunion.io/x/onecloud/pkg/cloudcommon/db/lockman" "yunion.io/x/onecloud/pkg/cloudcommon/db/proxy" "yunion.io/x/onecloud/pkg/cloudcommon/db/taskman" "yunion.io/x/onecloud/pkg/cloudcommon/validators" "yunion.io/x/onecloud/pkg/cloudprovider" "yunion.io/x/onecloud/pkg/compute/options" "yunion.io/x/onecloud/pkg/httperrors" "yunion.io/x/onecloud/pkg/mcclient" "yunion.io/x/onecloud/pkg/mcclient/auth" "yunion.io/x/onecloud/pkg/mcclient/modules/identity" "yunion.io/x/onecloud/pkg/util/logclient" "yunion.io/x/onecloud/pkg/util/rbacutils" "yunion.io/x/onecloud/pkg/util/stringutils2" ) type SCloudproviderManager struct { db.SEnabledStatusStandaloneResourceBaseManager db.SProjectizedResourceBaseManager SProjectMappingResourceBaseManager SSyncableBaseResourceManager } var CloudproviderManager *SCloudproviderManager func init() { CloudproviderManager = &SCloudproviderManager{ SEnabledStatusStandaloneResourceBaseManager: db.NewEnabledStatusStandaloneResourceBaseManager( SCloudprovider{}, "cloudproviders_tbl", "cloudprovider", "cloudproviders", ), } CloudproviderManager.SetVirtualObject(CloudproviderManager) } type SCloudprovider struct { db.SEnabledStatusStandaloneResourceBase db.SProjectizedResourceBase SSyncableBaseResource // 云端服务健康状态。例如欠费、项目冻结都属于不健康状态。 // // | HealthStatus | 说明 | // |---------------|----------------------| // | normal | 远端处于健康状态 | // | insufficient | 不足按需资源余额 | // | suspended | 远端处于冻结状态 | // | arrears | 远端处于欠费状态 | // | unknown | 未知状态,查询失败 | // | no permission | 没有权限获取账单信息 | // HealthStatus string `width:"16" charset:"ascii" default:"normal" nullable:"false" list:"domain"` // Hostname string `width:"64" charset:"ascii" nullable:"true"` // Column(VARCHAR(64, charset='ascii'), nullable=False) // port = Column(Integer, nullable=False) // Version string `width:"32" charset:"ascii" nullable:"true" list:"domain"` // Column(VARCHAR(32, charset='ascii'), nullable=True) // Sysinfo jsonutils.JSONObject `get:"domain"` // Column(JSONEncodedDict, nullable=True) AccessUrl string `width:"64" charset:"ascii" nullable:"true" list:"domain" update:"domain" create:"domain_optional"` // 云账号的用户信息,例如用户名,access key等 Account string `width:"128" charset:"ascii" nullable:"false" list:"domain" create:"domain_required"` // 云账号的密码信息,例如密码,access key secret等。该字段在数据库加密存储。Google需要存储秘钥证书,需要此字段比较长 Secret string `length:"0" charset:"ascii" nullable:"false" list:"domain" create:"domain_required"` // 归属云账号ID CloudaccountId string `width:"36" charset:"ascii" nullable:"false" list:"user" create:"required"` // ProjectId string `name:"tenant_id" width:"128" charset:"ascii" nullable:"true" list:"domain"` // LastSync time.Time `get:"domain" list:"domain"` // = Column(DateTime, nullable=True) // 云账号的平台信息 Provider string `width:"64" charset:"ascii" list:"domain" create:"domain_required"` SProjectMappingResourceBase } type pmCache struct { Id string CloudaccountId string AccountProjectMappingId string ManagerProjectMappingId string } func (self *pmCache) GetProjectMapping() (*SProjectMapping, error) { if len(self.ManagerProjectMappingId) > 0 { pm, err := GetRuleMapping(self.ManagerProjectMappingId) if err != nil { return nil, errors.Wrapf(err, "GetRuleMapping(%s)", self.ManagerProjectMappingId) } if pm.Enabled.IsTrue() { return pm, nil } } if len(self.AccountProjectMappingId) > 0 { return GetRuleMapping(self.AccountProjectMappingId) } return nil, errors.Wrapf(cloudprovider.ErrNotFound, "empty project mapping id") } var pmCaches map[string]*pmCache = map[string]*pmCache{} func refreshPmCaches() error { q := CloudproviderManager.Query().SubQuery() providers := q.Query(q.Field("cloudaccount_id"), q.Field("id"), q.Field("project_mapping_id").Label("manager_project_mapping_id")) sq := CloudaccountManager.Query().SubQuery() mq := providers.LeftJoin(sq, sqlchemy.Equals(q.Field("cloudaccount_id"), sq.Field("id"))).AppendField(sq.Field("project_mapping_id").Label("account_project_mapping_id")) caches := []pmCache{} err := mq.All(&caches) if err != nil { return errors.Wrapf(err, "q.All") } for i := range caches { pmCaches[caches[i].Id] = &caches[i] } return nil } func (self *SCloudprovider) GetProjectMapping() (*SProjectMapping, error) { cache, err := func() (*pmCache, error) { mp, ok := pmCaches[self.Id] if ok { return mp, nil } err := refreshPmCaches() if err != nil { return nil, errors.Wrapf(err, "refreshPmCaches") } return pmCaches[self.Id], nil }() if err != nil { return nil, errors.Wrapf(err, "get project mapping cache") } return cache.GetProjectMapping() } func (self *SCloudprovider) ValidateDeleteCondition(ctx context.Context, info jsonutils.JSONObject) error { // allow delete cloudprovider if it is disabled // account := self.GetCloudaccount() // if account != nil && account.EnableAutoSync { // return httperrors.NewInvalidStatusError("auto syncing is enabled on account") // } if self.GetEnabled() { return httperrors.NewInvalidStatusError("provider is enabled") } if self.SyncStatus != api.CLOUD_PROVIDER_SYNC_STATUS_IDLE { return httperrors.NewInvalidStatusError("provider is not idle") } // usage := self.getUsage() // if !usage.isEmpty() { // return httperrors.NewNotEmptyError("Not an empty cloud provider") // } return self.SEnabledStatusStandaloneResourceBase.ValidateDeleteCondition(ctx, nil) } func (manager *SCloudproviderManager) GetPublicProviderIdsQuery() *sqlchemy.SSubQuery { return manager.GetProviderIdsQuery(tristate.True, tristate.None, nil, nil) } func (manager *SCloudproviderManager) GetPrivateProviderIdsQuery() *sqlchemy.SSubQuery { return manager.GetProviderIdsQuery(tristate.False, tristate.False, nil, nil) } func (manager *SCloudproviderManager) GetOnPremiseProviderIdsQuery() *sqlchemy.SSubQuery { return manager.GetProviderIdsQuery(tristate.None, tristate.True, nil, nil) } func (manager *SCloudproviderManager) GetPrivateOrOnPremiseProviderIdsQuery() *sqlchemy.SSubQuery { return manager.GetProviderIdsQuery(tristate.False, tristate.None, nil, nil) } func (manager *SCloudproviderManager) GetProviderIdsQuery(isPublic tristate.TriState, isOnPremise tristate.TriState, providers []string, brands []string) *sqlchemy.SSubQuery { return manager.GetProviderFieldQuery("id", isPublic, isOnPremise, providers, brands) } func (manager *SCloudproviderManager) GetPublicProviderProvidersQuery() *sqlchemy.SSubQuery { return manager.GetProviderProvidersQuery(tristate.True, tristate.None) } func (manager *SCloudproviderManager) GetPrivateProviderProvidersQuery() *sqlchemy.SSubQuery { return manager.GetProviderProvidersQuery(tristate.False, tristate.False) } func (manager *SCloudproviderManager) GetOnPremiseProviderProvidersQuery() *sqlchemy.SSubQuery { return manager.GetProviderProvidersQuery(tristate.None, tristate.True) } func (manager *SCloudproviderManager) GetProviderProvidersQuery(isPublic tristate.TriState, isOnPremise tristate.TriState) *sqlchemy.SSubQuery { return manager.GetProviderFieldQuery("provider", isPublic, isOnPremise, nil, nil) } func (manager *SCloudproviderManager) GetProviderFieldQuery(field string, isPublic tristate.TriState, isOnPremise tristate.TriState, providers []string, brands []string) *sqlchemy.SSubQuery { q := manager.Query(field).Distinct() account := CloudaccountManager.Query().SubQuery() q = q.Join(account, sqlchemy.Equals( account.Field("id"), q.Field("cloudaccount_id")), ) if isPublic.IsTrue() { q = q.Filter(sqlchemy.IsTrue(account.Field("is_public_cloud"))) } else if isPublic.IsFalse() { q = q.Filter(sqlchemy.IsFalse(account.Field("is_public_cloud"))) } if isOnPremise.IsTrue() { q = q.Filter(sqlchemy.IsTrue(account.Field("is_on_premise"))) } else if isOnPremise.IsFalse() { q = q.Filter(sqlchemy.IsFalse(account.Field("is_on_premise"))) } if len(providers) > 0 || len(brands) > 0 { q = q.Filter(sqlchemy.OR( sqlchemy.In(account.Field("provider"), providers), sqlchemy.In(account.Field("brand"), brands), )) } return q.SubQuery() } func CloudProviderFilter(q *sqlchemy.SQuery, managerIdField sqlchemy.IQueryField, providers []string, brands []string, cloudEnv string) *sqlchemy.SQuery { if len(cloudEnv) == 0 && len(providers) == 0 && len(brands) == 0 { return q } isPublic := tristate.None isOnPremise := tristate.None includeOneCloud := false switch cloudEnv { case api.CLOUD_ENV_PUBLIC_CLOUD: isPublic = tristate.True case api.CLOUD_ENV_PRIVATE_CLOUD: isPublic = tristate.False isOnPremise = tristate.False case api.CLOUD_ENV_ON_PREMISE: isOnPremise = tristate.True includeOneCloud = true default: includeOneCloud = true } if includeOneCloud && len(providers) > 0 && !utils.IsInStringArray(api.CLOUD_PROVIDER_ONECLOUD, providers) { includeOneCloud = false } if includeOneCloud && len(brands) > 0 && !utils.IsInStringArray(api.CLOUD_PROVIDER_ONECLOUD, brands) { includeOneCloud = false } subq := CloudproviderManager.GetProviderIdsQuery(isPublic, isOnPremise, providers, brands) if includeOneCloud { return q.Filter(sqlchemy.OR( sqlchemy.In(managerIdField, subq), sqlchemy.IsNullOrEmpty(managerIdField), )) } else { return q.Filter(sqlchemy.In(managerIdField, subq)) } } func (self *SCloudprovider) CleanSchedCache() { hosts := []SHost{} q := HostManager.Query().Equals("manager_id", self.Id) if err := db.FetchModelObjects(HostManager, q, &hosts); err != nil { log.Errorf("failed to get hosts for cloudprovider %s error: %v", self.Name, err) return } for _, host := range hosts { host.ClearSchedDescCache() } } func (self *SCloudprovider) GetGuestCount() (int, error) { sq := HostManager.Query("id").Equals("manager_id", self.Id) return GuestManager.Query().In("host_id", sq).CountWithError() } func (self *SCloudprovider) GetHostCount() (int, error) { return HostManager.Query().Equals("manager_id", self.Id).IsFalse("is_emulated").CountWithError() } func (self *SCloudprovider) getVpcCount() (int, error) { return VpcManager.Query().Equals("manager_id", self.Id).IsFalse("is_emulated").CountWithError() } func (self *SCloudprovider) getStorageCount() (int, error) { return StorageManager.Query().Equals("manager_id", self.Id).IsFalse("is_emulated").CountWithError() } func (self *SCloudprovider) getStoragecacheCount() (int, error) { return StoragecacheManager.Query().Equals("manager_id", self.Id).CountWithError() } func (self *SCloudprovider) getEipCount() (int, error) { return ElasticipManager.Query().Equals("manager_id", self.Id).CountWithError() } func (self *SCloudprovider) getSnapshotCount() (int, error) { return SnapshotManager.Query().Equals("manager_id", self.Id).CountWithError() } func (self *SCloudprovider) getLoadbalancerCount() (int, error) { return LoadbalancerManager.Query().Equals("manager_id", self.Id).CountWithError() } func (self *SCloudprovider) getDBInstanceCount() (int, error) { q := DBInstanceManager.Query() q = q.Filter(sqlchemy.Equals(q.Field("manager_id"), self.Id)) return q.CountWithError() } func (self *SCloudprovider) getElasticcacheCount() (int, error) { vpcs := VpcManager.Query("id", "manager_id").SubQuery() q := ElasticcacheManager.Query() q = q.Join(vpcs, sqlchemy.Equals(q.Field("vpc_id"), vpcs.Field("id"))) q = q.Filter(sqlchemy.Equals(vpcs.Field("manager_id"), self.Id)) return q.CountWithError() } func (self *SCloudprovider) getExternalProjectCount() (int, error) { return ExternalProjectManager.Query().Equals("manager_id", self.Id).CountWithError() } func (self *SCloudprovider) getSyncRegionCount() (int, error) { return CloudproviderRegionManager.Query().Equals("cloudprovider_id", self.Id).CountWithError() } func (self *SCloudprovider) ValidateUpdateData(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.CloudproviderUpdateInput) (api.CloudproviderUpdateInput, error) { var err error input.EnabledStatusStandaloneResourceBaseUpdateInput, err = self.SEnabledStatusStandaloneResourceBase.ValidateUpdateData(ctx, userCred, query, input.EnabledStatusStandaloneResourceBaseUpdateInput) if err != nil { return input, errors.Wrap(err, "SEnabledStatusStandaloneResourceBase.ValidateUpdateData") } return input, nil } // +onecloud:swagger-gen-ignore func (self *SCloudproviderManager) ValidateCreateData(ctx context.Context, userCred mcclient.TokenCredential, ownerId mcclient.IIdentityProvider, query jsonutils.JSONObject, input api.CloudproviderCreateInput) (api.CloudproviderCreateInput, error) { return input, httperrors.NewUnsupportOperationError("Directly creating cloudprovider is not supported, create cloudaccount instead") } func (self *SCloudprovider) getAccessUrl() string { if len(self.AccessUrl) > 0 { return self.AccessUrl } account := self.GetCloudaccount() return account.AccessUrl } func (self *SCloudprovider) getPassword() (string, error) { if len(self.Secret) == 0 { account := self.GetCloudaccount() return account.getPassword() } return utils.DescryptAESBase64(self.Id, self.Secret) } func getTenant(ctx context.Context, projectId string, name string) (*db.STenant, error) { if len(projectId) > 0 { tenant, err := db.TenantCacheManager.FetchTenantById(ctx, projectId) if err != nil { return nil, errors.Wrap(err, "TenantCacheManager.FetchTenantById") } return tenant, nil } if len(name) == 0 { return nil, errors.Error("cannot syncProject for empty name") } return db.TenantCacheManager.FetchTenantByName(ctx, name) } func createTenant(ctx context.Context, name, domainId, desc string) (string, string, error) { s := auth.GetAdminSession(ctx, options.Options.Region, "") params := jsonutils.NewDict() params.Add(jsonutils.NewString(name), "generate_name") params.Add(jsonutils.NewString(domainId), "domain_id") params.Add(jsonutils.NewString(desc), "description") resp, err := identity.Projects.Create(s, par
"", "", errors.Wrapf(err, "getTenan") } return createTenant(ctx, name, domainId, desc) } share := self.GetSharedInfo() if tenant.DomainId == self.DomainId || (share.PublicScope == rbacutils.ScopeSystem || (share.PublicScope == rbacutils.ScopeDomain && utils.IsInStringArray(tenant.DomainId, share.SharedDomains))) { return tenant.DomainId, tenant.Id, nil } return createTenant(ctx, name, domainId, desc) } func (self *SCloudprovider) syncProject(ctx context.Context, userCred mcclient.TokenCredential) error { account := self.GetCloudaccount() if account == nil { return errors.Error("no valid cloudaccount???") } desc := fmt.Sprintf("auto create from cloud provider %s (%s)", self.Name, self.Id) domainId, projectId, err := account.getOrCreateTenant(ctx, self.Name, "", self.ProjectId, desc) if err != nil { return errors.Wrap(err, "getOrCreateTenant") } return self.saveProject(userCred, domainId, projectId) } func (self *SCloudprovider) saveProject(userCred mcclient.TokenCredential, domainId, projectId string) error { if projectId != self.ProjectId { diff, err := db.Update(self, func() error { self.DomainId = domainId self.ProjectId = projectId return nil }) if err != nil { log.Errorf("update projectId fail: %s", err) return err } db.OpsLog.LogEvent(self, db.ACT_UPDATE, diff, userCred) } return nil } type SSyncRange struct { api.SyncRangeInput } func (sr *SSyncRange) GetRegionIds() ([]string, error) { regionIds := []string{} if len(sr.Host) == 0 && len(sr.Zone) == 0 && len(sr.Region) == 0 { return regionIds, nil } hostQ := HostManager.Query().SubQuery() hosts := hostQ.Query().Filter(sqlchemy.OR( sqlchemy.In(hostQ.Field("id"), sr.Host), sqlchemy.In(hostQ.Field("name"), sr.Host), )).SubQuery() zoneQ := ZoneManager.Query().SubQuery() zones := zoneQ.Query().Filter(sqlchemy.OR( sqlchemy.In(zoneQ.Field("id"), sr.Zone), sqlchemy.In(zoneQ.Field("name"), sr.Zone), sqlchemy.In(zoneQ.Field("id"), hosts.Query(hosts.Field("zone_id")).SubQuery()), )).SubQuery() regionQ := CloudregionManager.Query().SubQuery() q := regionQ.Query(regionQ.Field("id")).Filter(sqlchemy.OR( sqlchemy.In(regionQ.Field("id"), sr.Region), sqlchemy.In(regionQ.Field("name"), sr.Region), sqlchemy.In(regionQ.Field("id"), zones.Query(zones.Field("cloudregion_id")).SubQuery()), )) rows, err := q.Rows() if err != nil { return nil, errors.Wrap(err, "q.Rows") } defer rows.Close() for rows.Next() { var regionId string err = rows.Scan(&regionId) if err != nil { return nil, errors.Wrap(err, "rows.Scan") } regionIds = append(regionIds, regionId) } return regionIds, nil } func (sr *SSyncRange) NeedSyncResource(res string) bool { if len(sr.Resources) == 0 { return true } return utils.IsInStringArray(res, sr.Resources) } func (sr *SSyncRange) NeedSyncInfo() bool { if sr.FullSync { return true } if len(sr.Region) > 0 || len(sr.Zone) > 0 || len(sr.Host) > 0 || len(sr.Resources) > 0 { return true } return false } func (sr *SSyncRange) normalizeRegionIds() error { for i := 0; i < len(sr.Region); i += 1 { obj, err := CloudregionManager.FetchByIdOrName(nil, sr.Region[i]) if err != nil { if err == sql.ErrNoRows { return httperrors.NewResourceNotFoundError("Region %s not found", sr.Region[i]) } else { return err } } sr.Region[i] = obj.GetId() } return nil } func (sr *SSyncRange) normalizeZoneIds() error { for i := 0; i < len(sr.Zone); i += 1 { obj, err := ZoneManager.FetchByIdOrName(nil, sr.Zone[i]) if err != nil { if err == sql.ErrNoRows { return httperrors.NewResourceNotFoundError("Zone %s not found", sr.Zone[i]) } else { return err } } zone := obj.(*SZone) region, _ := zone.GetRegion() if region == nil { continue } sr.Zone[i] = zone.GetId() if !utils.IsInStringArray(region.Id, sr.Region) { sr.Region = append(sr.Region, region.Id) } } return nil } func (sr *SSyncRange) normalizeHostIds() error { for i := 0; i < len(sr.Host); i += 1 { obj, err := HostManager.FetchByIdOrName(nil, sr.Host[i]) if err != nil { if err == sql.ErrNoRows { return httperrors.NewResourceNotFoundError("Host %s not found", sr.Host[i]) } else { return err } } host := obj.(*SHost) zone, _ := host.GetZone() if zone == nil { continue } region, _ := zone.GetRegion() if region == nil { continue } sr.Host[i] = host.GetId() if !utils.IsInStringArray(zone.Id, sr.Zone) { sr.Zone = append(sr.Zone, zone.Id) } if !utils.IsInStringArray(region.Id, sr.Region) { sr.Region = append(sr.Region, region.Id) } } return nil } func (sr *SSyncRange) Normalize() error { if sr.Region != nil && len(sr.Region) > 0 { err := sr.normalizeRegionIds() if err != nil { return err } } else { sr.Region = make([]string, 0) } if sr.Zone != nil && len(sr.Zone) > 0 { err := sr.normalizeZoneIds() if err != nil { return err } } else { sr.Zone = make([]string, 0) } if sr.Host != nil && len(sr.Host) > 0 { err := sr.normalizeHostIds() if err != nil { return err } } else { sr.Host = make([]string, 0) } return nil } func (self *SCloudprovider) AllowPerformSync(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) bool { return db.IsAdminAllowPerform(userCred, self, "sync") } func (self *SCloudprovider) PerformSync(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.SyncRangeInput) (jsonutils.JSONObject, error) { if !self.GetEnabled() { return nil, httperrors.NewInvalidStatusError("Cloudprovider disabled") } account := self.GetCloudaccount() if !account.GetEnabled() { return nil, httperrors.NewInvalidStatusError("Cloudaccount disabled") } if account.EnableAutoSync && self.SyncStatus != api.CLOUD_PROVIDER_SYNC_STATUS_IDLE { return nil, httperrors.NewInvalidStatusError("Cloudprovider is not idle") } syncRange := SSyncRange{input} if syncRange.FullSync || len(syncRange.Region) > 0 || len(syncRange.Zone) > 0 || len(syncRange.Host) > 0 || len(syncRange.Resources) > 0 { syncRange.DeepSync = true } if self.CanSync() || syncRange.Force { return nil, self.StartSyncCloudProviderInfoTask(ctx, userCred, &syncRange, "") } return nil, nil } func (self *SCloudprovider) StartSyncCloudProviderInfoTask(ctx context.Context, userCred mcclient.TokenCredential, syncRange *SSyncRange, parentTaskId string) error { params := jsonutils.NewDict() if syncRange != nil { params.Add(jsonutils.Marshal(syncRange), "sync_range") } task, err := taskman.TaskManager.NewTask(ctx, "CloudProviderSyncInfoTask", self, userCred, params, parentTaskId, "", nil) if err != nil { log.Errorf("startSyncCloudProviderInfoTask newTask error %s", err) return err } if cloudaccount := self.GetCloudaccount(); cloudaccount != nil { cloudaccount.markAutoSync(userCred) cloudaccount.MarkSyncing(userCred) } self.markStartSync(userCred, syncRange) db.OpsLog.LogEvent(self, db.ACT_SYNC_HOST_START, "", userCred) task.ScheduleRun(nil) return nil } func (self *SCloudprovider) AllowPerformChangeProject(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input apis.PerformChangeProjectOwnerInput) bool { return db.IsAdminAllowPerform(userCred, self, "change-project") } func (self *SCloudprovider) PerformChangeProject(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input apis.PerformChangeProjectOwnerInput) (jsonutils.JSONObject, error) { project := input.ProjectId tenant, err := db.TenantCacheManager.FetchTenantByIdOrName(ctx, project) if err != nil { return nil, httperrors.NewNotFoundError("project %s not found", project) } if self.ProjectId == tenant.Id { return nil, nil } account := self.GetCloudaccount() if self.DomainId != tenant.DomainId { if !db.IsAdminAllowPerform(userCred, self, "change-project") { return nil, httperrors.NewForbiddenError("not allow to change project across domain") } if account.ShareMode == api.CLOUD_ACCOUNT_SHARE_MODE_ACCOUNT_DOMAIN && account.DomainId != tenant.DomainId { return nil, httperrors.NewInvalidStatusError("cannot change to a different domain from a private cloud account") } // if account's public_scope=domain and share_mode=provider_domain, only allow to share to specific domains if account.PublicScope == string(rbacutils.ScopeDomain) { sharedDomains := account.GetSharedDomains() if !utils.IsInStringArray(tenant.DomainId, sharedDomains) && account.DomainId != tenant.DomainId { return nil, errors.Wrap(httperrors.ErrForbidden, "cannot set to domain outside of the shared domains") } } // otherwise, allow change project across domain } notes := struct { OldProjectId string OldDomainId string NewProjectId string NewProject string NewDomainId string NewDomain string }{ OldProjectId: self.ProjectId, OldDomainId: self.DomainId, NewProjectId: tenant.Id, NewProject: tenant.Name, NewDomainId: tenant.DomainId, NewDomain: tenant.Domain, } err = self.saveProject(userCred, tenant.DomainId, tenant.Id) if err != nil { log.Errorf("Update cloudprovider error: %v", err) return nil, httperrors.NewGeneralError(err) } logclient.AddSimpleActionLog(self, logclient.ACT_CHANGE_OWNER, notes, userCred, true) if account.EnableAutoSync { // no need to sync rightnow, will do it in auto sync return nil, nil } return nil, self.StartSyncCloudProviderInfoTask(ctx, userCred, &SSyncRange{SyncRangeInput: api.SyncRangeInput{ FullSync: true, DeepSync: true, }}, "") } func (self *SCloudprovider) markStartingSync(userCred mcclient.TokenCredential, syncRange *SSyncRange) error { _, err := db.Update(self, func() error { self.SyncStatus = api.CLOUD_PROVIDER_SYNC_STATUS_QUEUING return nil }) if err != nil { log.Errorf("Failed to markStartSync error: %v", err) return errors.Wrap(err, "Update") } cprs := self.GetCloudproviderRegions() for i := range cprs { if cprs[i].Enabled { err := cprs[i].markStartingSync(userCred, syncRange) if err != nil { return errors.Wrap(err, "cprs[i].markStartingSync") } } } return nil } func (self *SCloudprovider) markStartSync(userCred mcclient.TokenCredential, syncRange *SSyncRange) error { _, err := db.Update(self, func() error { self.SyncStatus = api.CLOUD_PROVIDER_SYNC_STATUS_QUEUED return nil }) if err != nil { log.Errorf("Failed to markStartSync error: %v", err) return err } cprs := self.GetCloudproviderRegions() for i := range cprs { if cprs[i].Enabled { err := cprs[i].markStartingSync(userCred, syncRange) if err != nil { return errors.Wrap(err, "cprs[i].markStartingSync") } } } return nil } func (self *SCloudprovider) markSyncing(userCred mcclient.TokenCredential) error { _, err := db.Update(self, func() error { self.SyncStatus = api.CLOUD_PROVIDER_SYNC_STATUS_SYNCING self.LastSync = timeutils.UtcNow() self.LastSyncEndAt = time.Time{} return nil }) if err != nil { log.Errorf("Failed to markSyncing error: %v", err) return err } return nil } func (self *SCloudprovider) markEndSyncWithLock(ctx context.Context, userCred mcclient.TokenCredential) error { err := func() error { lockman.LockObject(ctx, self) defer lockman.ReleaseObject(ctx, self) if self.SyncStatus == api.CLOUD_PROVIDER_SYNC_STATUS_IDLE { return nil } if self.getSyncStatus2() != api.CLOUD_PROVIDER_SYNC_STATUS_IDLE { return nil } err := self.markEndSync(userCred) if err != nil { return err } return nil }() if err != nil { return err } account := self.GetCloudaccount() return account.MarkEndSyncWithLock(ctx, userCred) } func (self *SCloudprovider) markEndSync(userCred mcclient.TokenCredential) error { _, err := db.Update(self, func() error { self.SyncStatus = api.CLOUD_PROVIDER_SYNC_STATUS_IDLE self.LastSyncEndAt = timeutils.UtcNow() return nil }) if err != nil { return errors.Wrapf(err, "markEndSync") } return nil } func (self *SCloudprovider) cancelStartingSync(userCred mcclient.TokenCredential) error { if self.SyncStatus == api.CLOUD_PROVIDER_SYNC_STATUS_QUEUING { cprs := self.GetCloudproviderRegions() for i := range cprs { err := cprs[i].cancelStartingSync(userCred) if err != nil { return errors.Wrap(err, "cprs[i].cancelStartingSync") } } _, err := db.Update(self, func() error { self.SyncStatus = api.CLOUD_PROVIDER_SYNC_STATUS_IDLE return nil }) if err != nil { return errors.Wrap(err, "db.Update") } } return nil } func (self *SCloudprovider) GetProviderFactory() (cloudprovider.ICloudProviderFactory, error) { return cloudprovider.GetProviderFactory(self.Provider) } func (self *SCloudprovider) GetProvider() (cloudprovider.ICloudProvider, error) { if !self.GetEnabled() { return nil, errors.Wrap(httperrors.ErrInvalidStatus, "Cloud provider is not enabled") } accessUrl := self.getAccessUrl() passwd, err := self.getPassword() if err != nil { return nil, err } account := self.GetCloudaccount() return cloudprovider.GetProvider(cloudprovider.ProviderConfig{ Id: self.Id, Name: self.Name, Vendor: self.Provider, URL: accessUrl, Account: self.Account, Secret: passwd, ProxyFunc: account.proxyFunc(), Options: account.Options, }) } func (self *SCloudprovider) savePassword(secret string) error { sec, err := utils.EncryptAESBase64(self.Id, secret) if err != nil { return err } _, err = db.Update(self, func() error { self.Secret = sec return nil }) return err } func (self *SCloudprovider) GetCloudaccount() *SCloudaccount { return CloudaccountManager.FetchCloudaccountById(self.CloudaccountId) } func (manager *SCloudproviderManager) FetchCloudproviderById(providerId string) *SCloudprovider { providerObj, err := manager.FetchById(providerId) if err != nil { return nil } return providerObj.(*SCloudprovider) } func IsProviderAccountEnabled(providerId string) bool { if len(providerId) == 0 { return true } return CloudproviderManager.IsProviderAccountEnabled(providerId) } func (manager *SCloudproviderManager) IsProviderAccountEnabled(providerId string) bool { providerObj := manager.FetchCloudproviderById(providerId) if providerObj == nil { return false } if !providerObj.GetEnabled() { return false } account := providerObj.GetCloudaccount() if account == nil { return false } return account.GetEnabled() } func (manager *SCloudproviderManager) FetchCloudproviderByIdOrName(providerId string) *SCloudprovider { providerObj, err := manager.FetchByIdOrName(nil, providerId) if err != nil { if err != sql.ErrNoRows { log.Errorf("%s", err) } return nil } return providerObj.(*SCloudprovider) } func (self *SCloudprovider) getUsage() api.SCloudproviderUsage { usage := api.SCloudproviderUsage{} usage.GuestCount, _ = self.GetGuestCount() usage.HostCount, _ = self.GetHostCount() usage.VpcCount, _ = self.getVpcCount() usage.StorageCount, _ = self.getStorageCount() usage.StorageCacheCount, _ = self.getStoragecacheCount() usage.EipCount, _ = self.getEipCount() usage.SnapshotCount, _ = self.getSnapshotCount() usage.LoadbalancerCount, _ = self.getLoadbalancerCount() usage.DBInstanceCount, _ = self.getDBInstanceCount() usage.ElasticcacheCount, _ = self.getElasticcacheCount() usage.ProjectCount, _ = self.getExternalProjectCount() usage.SyncRegionCount, _ = self.getSyncRegionCount() return usage } func (self *SCloudprovider) getProject(ctx context.Context) *db.STenant { proj, _ := db.TenantCacheManager.FetchTenantById(ctx, self.ProjectId) return proj } func (manager *SCloudproviderManager) FetchCustomizeColumns( ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, objs []interface{}, fields stringutils2.SSortedStrings, isList bool, ) []api.CloudproviderDetails { rows := make([]api.CloudproviderDetails, len(objs)) stdRows := manager.SEnabledStatusStandaloneResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) projRows := manager.SProjectizedResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) pmRows := manager.SProjectMappingResourceBaseManager.FetchCustomizeColumns(ctx, userCred, query, objs, fields, isList) accountIds := make([]string, len(objs)) for i := range rows { provider := objs[i].(*SCloudprovider) accountIds[i] = provider.CloudaccountId rows[i] = api.CloudproviderDetails{ EnabledStatusStandaloneResourceDetails: stdRows[i], ProjectizedResourceInfo: projRows[i], SCloudproviderUsage: provider.getUsage(), SyncStatus2: provider.getSyncStatus2(), ProjectMappingResourceInfo: pmRows[i], } capabilities, _ := CloudproviderCapabilityManager.getCapabilities(provider.Id) if len(capabilities) > 0 { rows[i].Capabilities = capabilities } } accounts := make(map[string]SCloudaccount) err := db.FetchStandaloneObjectsByIds(CloudaccountManager, accountIds, &accounts) if err != nil { log.Errorf("FetchStandaloneObjectsByIds (%s) fail %s", CloudaccountManager.KeywordPlural(), err) return rows } proxySettingIds := make([]string, len(accounts)) for i := range accounts { proxySettingId := accounts[i].ProxySettingId if !utils.IsInStringArray(proxySettingId, proxySettingIds) { proxySettingIds = append(proxySettingIds, proxySettingId) } } proxySettings := make(map[string]proxy.SProxySetting) err = db.FetchStandaloneObjectsByIds(proxy.ProxySettingManager, proxySettingIds, &proxySettings) if err != nil { log.Errorf("FetchStandaloneObjectsByIds (%s) fail %s", proxy.ProxySettingManager.KeywordPlural(), err) return rows } for i := range rows { if account, ok := accounts[accountIds[i]]; ok { rows[i].Cloudaccount = account.Name rows[i].Brand = account.Brand ps := &rows[i].ProxySetting if proxySetting, ok := proxySettings[account.ProxySettingId]; ok { ps.Id = proxySetting.Id ps.Name = proxySetting.Name ps.HTTPProxy = proxySetting.HTTPProxy ps.HTTPSProxy = proxySetting.HTTPSProxy ps.NoProxy = proxySetting.NoProxy } } } return rows } func (manager *SCloudproviderManager) InitializeData() error { // move vmware info from vcenter to cloudprovider vcenters := make([]SVCenter, 0) q := VCenterManager.Query() err := db.FetchModelObjects(VCenterManager, q, &vcenters) if err != nil { return err } for _, vc := range vcenters { _, err := CloudproviderManager.FetchById(vc.Id) if err != nil { if err == sql.ErrNoRows { err = manager.migrateVCenterInfo(&vc) if err != nil { log.Errorf("migrateVcenterInfo fail %s", err) return err } _, err = db.Update(&vc, func() error { return vc.MarkDelete() }) if err != nil { log.Errorf("delete vcenter record fail %s", err) return err } } else { log.Errorf("fetch cloudprovider fail %s", err) return err } } else { log.Debugf("vcenter info has been migrate into cloudprovider") } } // fill empty projectId with system project ID providers := make([]SCloudprovider, 0) q = CloudproviderManager.Query() q = q.Filter(sqlchemy.OR(sqlchemy.IsEmpty(q.Field("tenant_id")), sqlchemy.IsNull(q.Field("tenant_id")))) err = db.FetchModelObjects(CloudproviderManager, q, &providers) if err != nil { log.Errorf("query cloudproviders with empty tenant_id fail %s", err) return err } for i := 0; i < len(providers); i += 1 { _, err := db.Update(&providers[i], func() error { providers[i].DomainId = auth.AdminCredential().GetProjectDomainId() providers[i].ProjectId = auth.AdminCredential().GetProjectId() return nil }) if err != nil { log.Errorf("update cloudprovider project fail %s", err) return err } } return nil } func (manager *SCloudproviderManager) migrateVCenterInfo(vc *SVCenter) error { cp := SCloudprovider{} cp.SetModelManager(manager, &cp) newName, err := db.GenerateName(context.Background(), manager, nil, vc.Name) if err != nil { return err } cp.Id = vc.Id cp.Name = newName cp.Status = vc.Status cp.AccessUrl = fmt.Sprintf("https://%s:%d", vc.Hostname, vc.Port) cp.Account = vc.Account cp.Secret = vc.Password cp.LastSync = vc.LastSync cp.Provider = api.CLOUD_PROVIDER_VMWARE return manager.TableSpec().Insert(context.TODO(), &cp) } // 云订阅列表 func (manager *SCloudproviderManager) ListItemFilter( ctx context.Context, q *sqlchemy.SQuery, userCred mcclient.TokenCredential, query api.CloudproviderListInput, ) (*sqlchemy.SQuery, error) { accountArr := query.CloudaccountId if len(accountArr) > 0 { cpq := CloudaccountManager.Query().SubQuery() subcpq := cpq.Query(cpq.Field("id")).Filter(sqlchemy.OR( sqlchemy.In(cpq.Field("id"), stringutils2.RemoveUtf8Strings(accountArr)), sqlchemy.In(cpq.Field("name"), accountArr), )).SubQuery() q = q.In("cloudaccount_id", subcpq) } var zone *SZone var region *SCloudregion if len(query.ZoneId) > 0 { zoneObj, err := ZoneManager.FetchByIdOrName(userCred, query.ZoneId) if err != nil { if err == sql.ErrNoRows { return nil, errors.Wrapf(httperrors.ErrResourceNotFound, "%s %s", ZoneManager.Keyword(), query.ZoneId) } else { return nil, errors.Wrap(err, "ZoneManager.FetchByIdOrName") } } zone = zoneObj.(*SZone) pr := CloudproviderRegionManager.Query().SubQuery() sq := pr.Query(pr.Field("cloudprovider_id")).Equals("cloudregion_id", zone.CloudregionId).Distinct() q = q.In("id", sq) } else if len(query.CloudregionId) > 0 { regionObj, err := CloudregionManager.FetchByIdOrName(userCred, query.CloudregionId) if err != nil { if err == sql.ErrNoRows { return nil, httperrors.NewResourceNotFoundError2("cloudregion", query.CloudregionId) } return nil, httperrors.NewGeneralError(err) } region = regionObj.(*SCloudregion) pr := CloudproviderRegionManager.Query().SubQuery() sq := pr.Query(pr.Field("cloudprovider_id")).Equals("cloudregion_id", region.Id).Distinct() q = q.In("id", sq) } if query.Usable != nil && *query.Usable { providers := usableCloudProviders().SubQuery() networks := NetworkManager.Query().SubQuery() wires := WireManager.Query().SubQuery() vpcs := VpcManager.Query().SubQuery() providerRegions := CloudproviderRegionManager.Query().SubQuery() sq := providers.Query(sqlchemy.DISTINCT("id", providers.Field("id"))) sq = sq.Join(providerRegions, sqlchemy.Equals(providers.Field("id"), providerRegions.Field("cloudprovider_id"))) sq = sq.Join(vpcs, sqlchemy.Equals(providerRegions.Field("cloudregion_id"), vpcs.Field("cloudregion_id"))) sq = sq.Join(wires, sqlchemy.Equals(vpcs.Field("id"), wires.Field("vpc_id"))) sq = sq.Join(networks, sqlchemy.Equals(wires.Field("id"), networks.Field("wire_id"))) sq = sq.Filter(sqlchemy.Equals(vpcs.Field("status"), api.VPC_STATUS_AVAILABLE)) sq = sq.Filter(sqlchemy.Equals(networks.Field("status"), api.NETWORK_STATUS_AVAILABLE)) sq = sq.Filter(sqlchemy.OR( sqlchemy.IsNullOrEmpty(vpcs.Field("manager_id")), sqlchemy.Equals(vpcs.Field("manager_id"), providers.Field("id")), )) if zone != nil { zoneFilter := sqlchemy.OR(sqlchemy.Equals(wires.Field("zone_id"), zone.GetId()), sqlchemy.IsNullOrEmpty(wires.Field("zone_id"))) sq = sq.Filter(zoneFilter) } else if region != nil { sq = sq.Filter(sqlchemy.Equals(vpcs.Field("cloudregion_id"), region.GetId())) } q = q.Filter(sqlchemy.In(q.Field("id"), sq.SubQuery())) } q, err := manager.SEnabledStatusStandaloneResourceBaseManager.ListItemFilter(ctx, q, userCred, query.EnabledStatusStandaloneResourceListInput) if err != nil { return nil, errors.Wrap(err, "SEnabledStatusStandaloneResourceBaseManager.ListItemFilter") } q, err = manager.SProjectizedResourceBaseManager.ListItemFilter(ctx, q, userCred, query.ProjectizedResourceListInput) if err != nil { return nil, errors.Wrapf(err, "SProjectizedResourceBaseManager.ListItemFilter") } q, err = manager.SSyncableBaseResourceManager.ListItemFilter(ctx, q, userCred, query.SyncableBaseResourceListInput) if err != nil { return nil, errors.Wrap(err, "SSyncableBaseResourceManager.ListItemFilter") } managerStr := query.CloudproviderId if len(managerStr) > 0 { providerObj, err := manager.FetchByIdOrName(userCred, managerStr) if err != nil { if err == sql.ErrNoRows { return nil, httperrors.NewResourceNotFoundError2(CloudproviderManager.Keyword(), managerStr) } else { return nil, httperrors.NewGeneralError(err) } } q = q.Equals("id", providerObj.GetId()) } cloudEnvStr := query.CloudEnv if cloudEnvStr == api.CLOUD_ENV_PUBLIC_CLOUD { cloudaccounts := CloudaccountManager.Query().SubQuery() q = q.Join(cloudaccounts, sqlchemy.Equals(cloudaccounts.Field("id"), q.Field("cloudaccount_id"))) q = q.Filter(sqlchemy.IsTrue(cloudaccounts.Field("is_public_cloud"))) q = q.Filter(sqlchemy.IsFalse(cloudaccounts.Field("is_on_premise"))) } if cloudEnvStr == api.CLOUD_ENV_PRIVATE_CLOUD { cloudaccounts := CloudaccountManager.Query().SubQuery() q = q.Join(cloudaccounts, sqlchemy.Equals(cloudaccounts.Field("id"), q.Field("cloudaccount_id"))) q = q.Filter(sqlchemy.IsFalse(cloudaccounts.Field("is_public_cloud"))) q = q.Filter(sqlchemy.IsFalse(cloudaccounts.Field("is_on_premise"))) } if cloudEnvStr == api.CLOUD_ENV_ON_PREMISE { cloudaccounts := CloudaccountManager.Query().SubQuery() q = q.Join(cloudaccounts, sqlchemy.Equals(cloudaccounts.Field("id"), q.Field("cloudaccount_id"))) q = q.Filter(sqlchemy.IsFalse(cloudaccounts.Field("is_public_cloud"))) q = q.Filter(sqlchemy.IsTrue(cloudaccounts.Field("is_on_premise"))) } capabilities := query.Capability if len(capabilities) > 0 { subq := CloudproviderCapabilityManager.Query("cloudprovider_id").In("capability", capabilities).Distinct().SubQuery() q = q.In("id", subq) } if len(query.HealthStatus) > 0 { q = q.In("health_status", query.HealthStatus) } if len(query.Providers) > 0 { subq := CloudaccountManager.Query("id").In("provider", query.Providers).SubQuery() q = q.In("cloudaccount_id", subq) } if len(query.Brands) > 0 { subq := CloudaccountManager.Query("id").In("brand", query.Brands).SubQuery() q = q.In("cloudaccount_id", subq) } if len(query.HostSchedtagId) > 0 { schedTagObj, err := SchedtagManager.FetchByIdOrName(userCred, query.HostSchedtagId) if err != nil { if errors.Cause(err) == sql.ErrNoRows { return nil, errors.Wrapf(httperrors.ErrResourceNotFound, "%s %s", SchedtagManager.Keyword(), query.HostSchedtagId) } else { return nil, errors.Wrap(err, "SchedtagManager.FetchByIdOrName") } } subq := HostManager.Query("manager_id") hostschedtags := HostschedtagManager.Query().Equals("schedtag_id", schedTagObj.GetId()).SubQuery() subq = subq.Join(hostschedtags, sqlchemy.Equals(hostschedtags.Field("host_id"), subq.Field("id"))) log.Debugf("%s", subq.String()) q = q.In("id", subq.SubQuery()) } return q, nil } func (manager *SCloudproviderManager) OrderByExtraFields( ctx context.Context, q *sqlchemy.SQuery, userCred mcclient.TokenCredential, query api.CloudproviderListInput, ) (*sqlchemy.SQuery, error) { var err error q, err = manager.SEnabledStatusStandaloneResourceBaseManager.OrderByExtraFields(ctx, q, userCred, query.EnabledStatusStandaloneResourceListInput) if err != nil { return nil, errors.Wrap(err, "SEnabledStatusStandaloneResourceBaseManager.OrderByExtraFields") } return q, nil } func (manager *SCloudproviderManager) QueryDistinctExtraField(q *sqlchemy.SQuery, field string) (*sqlchemy.SQuery, error) { var err error if field == "manager" { q = q.AppendField(q.Field("name").Label("manager")).Distinct() return q, nil } if field == "account" { accounts := CloudaccountManager.Query("name", "id").SubQuery() q.AppendField(accounts.Field("name", field)).Distinct() q = q.Join(accounts, sqlchemy.Equals(q.Field("cloudaccount_id"), accounts.Field("id"))) return q, nil } q, err = manager.SEnabledStatusStandaloneResourceBaseManager.QueryDistinctExtraField(q, field) if err == nil { return q, nil } return q, httperrors.ErrNotFound } func (provider *SCloudprovider) markProviderDisconnected(ctx context.Context, userCred mcclient.TokenCredential, reason string) error { _, err := db.UpdateWithLock(ctx, provider, func() error { provider.HealthStatus = api.CLOUD_PROVIDER_HEALTH_UNKNOWN return nil }) if err != nil { return err } provider.SetStatus(userCred, api.CLOUD_PROVIDER_DISCONNECTED, reason) return provider.ClearSchedDescCache() } func (self *SCloudprovider) updateName(ctx context.Context, userCred mcclient.TokenCredential, name string) error { if self.Name != name { diff, err := db.Update(self, func() error { self.Name = name return nil }) if err != nil { return errors.Wrapf(err, "db.Update") } db.OpsLog.LogEvent(self, db.ACT_UPDATE, diff, userCred) } return nil } func (provider *SCloudprovider) markProviderConnected(ctx context.Context, userCred mcclient.TokenCredential, healthStatus string) error { if healthStatus != provider.HealthStatus { diff, err := db.Update(provider, func() error { provider.HealthStatus = healthStatus return nil }) if err != nil { return err } db.OpsLog.LogEvent(provider, db.ACT_UPDATE, diff, userCred) } provider.SetStatus(userCred, api.CLOUD_PROVIDER_CONNECTED, "") return provider.ClearSchedDescCache() } func (provider *SCloudprovider) prepareCloudproviderRegions(ctx context.Context, userCred mcclient.TokenCredential) ([]SCloudproviderregion, error) { driver, err := provider.GetProvider() if err != nil { return nil, errors.Wrap(err, "provider.GetProvider") } err = CloudproviderCapabilityManager.setCapabilities(ctx, userCred, provider.Id, driver.GetCapabilities()) if err != nil { return nil, errors.Wrap(err, "CloudproviderCapabilityManager.setCapabilities") } if driver.GetFactory().IsOnPremise() { cpr := CloudproviderRegionManager.FetchByIdsOrCreate(provider.Id, api.DEFAULT_REGION_ID) cpr.setCapabilities(ctx, userCred, driver.GetCapabilities()) return []SCloudproviderregion{*cpr}, nil } iregions := driver.GetIRegions() externalIdPrefix := driver.GetCloudRegionExternalIdPrefix() _, _, cprs, result := CloudregionManager.SyncRegions(ctx, userCred, provider, externalIdPrefix, iregions) if result.IsError() { log.Errorf("syncRegion fail %s", result.Result()) } return cprs, nil } func (provider *SCloudprovider) GetCloudproviderRegions() []SCloudproviderregion { q := CloudproviderRegionManager.Query() q = q.Equals("cloudprovider_id", provider.Id) // q = q.IsTrue("enabled") // q = q.Equals("sync_status", api.CLOUD_PROVIDER_SYNC_STATUS_IDLE) return CloudproviderRegionManager.fetchRecordsByQuery(q) } func (provider *SCloudprovider) resetAutoSync() { cprs := provider.GetCloudproviderRegions() for i := range cprs { cprs[i].resetAutoSync() } } func (provider *SCloudprovider) syncCloudproviderRegions(ctx context.Context, userCred mcclient.TokenCredential, syncRange SSyncRange, wg *sync.WaitGroup, autoSync bool) { provider.markSyncing(userCred) cprs := provider.GetCloudproviderRegions() regionIds, _ := syncRange.GetRegionIds() syncCnt := 0 for i := range cprs { if cprs[i].Enabled && cprs[i].CanSync() && (!autoSync || cprs[i].needAutoSync()) && (len(regionIds) == 0 || utils.IsInStringArray(cprs[i].CloudregionId, regionIds)) { syncCnt += 1 if wg != nil { wg.Add(1) } cprs[i].submitSyncTask(ctx, userCred, syncRange) if wg != nil { wg.Done() } } } if syncCnt == 0 { err := provider.markEndSyncWithLock(ctx, userCred) if err != nil { log.Errorf("markEndSyncWithLock for %s error: %v", provider.Name, err) } } } func (provider *SCloudprovider) SyncCallSyncCloudproviderRegions(ctx context.Context, userCred mcclient.TokenCredential, syncRange SSyncRange) { var wg sync.WaitGroup provider.syncCloudproviderRegions(ctx, userCred, syncRange, &wg, false) wg.Wait() } func (self *SCloudprovider) IsAvailable() bool { if !self.GetEnabled() { return false } if !utils.IsInStringArray(self.Status, api.CLOUD_PROVIDER_VALID_STATUS) { return false } if !utils.IsInStringArray(self.HealthStatus, api.CLOUD_PROVIDER_VALID_HEALTH_STATUS) { return false } return true } func (self *SCloudprovider) Delete(ctx context.Context, userCred mcclient.TokenCredential) error { // override log.Infof("cloud provider delete do nothing") return nil } func (self *SCloudprovider) RealDelete(ctx context.Context, userCred mcclient.TokenCredential) error { var err error for _, manager := range []IPurgeableManager{ BucketManager, HostManager, SnapshotManager, SnapshotPolicyManager, StorageManager, StoragecacheManager, SecurityGroupCacheManager, LoadbalancerManager, LoadbalancerBackendGroupManager, CachedLoadbalancerAclManager, CachedLoadbalancerCertificateManager, LoadbalancerCertificateManager, NatGatewayManager, DBInstanceManager, DBInstanceBackupManager, ElasticcacheManager, AccessGroupCacheManager, FileSystemManager, WafRuleGroupCacheManager, WafIPSetCacheManager, WafRegexSetCacheManager, WafInstanceManager, AppManager, VpcManager, ElasticipManager, MongoDBManager, ElasticSearchManager, KafkaManager, CDNDomainManager, NetworkInterfaceManager, KubeClusterManager, InterVpcNetworkManager, CloudproviderRegionManager, CloudregionManager, CloudproviderQuotaManager, } { err = manager.purgeAll(ctx, userCred, self.Id) if err != nil { return errors.Wrapf(err, "purge %s", manager.Keyword()) } log.Debugf("%s purgeall success!", manager.Keyword()) } CloudproviderCapabilityManager.removeCapabilities(ctx, userCred, self.Id) err = DnsZoneCacheManager.removeCaches(ctx, userCred, self.Id) if err != nil { return errors.Wrapf(err, "remove dns caches") } return self.SEnabledStatusStandaloneResourceBase.Delete(ctx, userCred) } func (self *SCloudprovider) CustomizeDelete(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) error { return self.StartCloudproviderDeleteTask(ctx, userCred, "") } func (self *SCloudprovider) StartCloudproviderDeleteTask(ctx context.Context, userCred mcclient.TokenCredential, parentTaskId string) error { params := jsonutils.NewDict() task, err := taskman.TaskManager.NewTask(ctx, "CloudProviderDeleteTask", self, userCred, params, parentTaskId, "", nil) if err != nil { return errors.Wrapf(err, "NewTask") } self.SetStatus(userCred, api.CLOUD_PROVIDER_START_DELETE, "StartCloudproviderDeleteTask") task.ScheduleRun(nil) return nil } func (self *SCloudprovider) GetRegionDriver() (IRegionDriver, error) { driver := GetRegionDriver(self.Provider) if driver == nil { return nil, fmt.Errorf("failed to found region driver for %s", self.Provider) } return driver, nil } func (self *SCloudprovider) ClearSchedDescCache() error { hosts := make([]SHost, 0) q := HostManager.Query().Equals("manager_id", self.Id) err := db.FetchModelObjects(HostManager, q, &hosts) if err != nil { return err } for i := range hosts { err := hosts[i].ClearSchedDescCache() if err != nil { log.Errorf("host CleanHostSchedCache error: %v", err) return err } } return nil } func (self *SCloudprovider) PerformEnable(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input apis.PerformEnableInput) (jsonutils.JSONObject, error) { if strings.Index(self.Status, "delet") >= 0 { return nil, httperrors.NewInvalidStatusError("Cannot enable deleting account") } _, err := self.SEnabledStatusStandaloneResourceBase.PerformEnable(ctx, userCred, query, input) if err != nil { return nil, err } account := self.GetCloudaccount() if account != nil { if !account.GetEnabled() { return account.enableAccountOnly(ctx, userCred, nil, input) } } return nil, nil } func (self *SCloudprovider) PerformDisable(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input apis.PerformDisableInput) (jsonutils.JSONObject, error) { _, err := self.SEnabledStatusStandaloneResourceBase.PerformDisable(ctx, userCred, query, input) if err != nil { return nil, err } account := self.GetCloudaccount() if account != nil { allDisable := true providers := account.GetCloudproviders() for i := range providers { if providers[i].GetEnabled() { allDisable = false break } } if allDisable && account.GetEnabled() { return account.PerformDisable(ctx, userCred, nil, input) } } return nil, nil } func (manager *SCloudproviderManager) filterByDomainId(q *sqlchemy.SQuery, domainId string) *sqlchemy.SQuery { subq := db.SharedResourceManager.Query("resource_id") subq = subq.Equals("resource_type", CloudaccountManager.Keyword()) subq = subq.Equals("target_project_id", domainId) subq = subq.Equals("target_type", db.SharedTargetDomain) cloudaccounts := CloudaccountManager.Query().SubQuery() q = q.Join(cloudaccounts, sqlchemy.Equals( q.Field("cloudaccount_id"), cloudaccounts.Field("id"), )) q = q.Filter(sqlchemy.OR( sqlchemy.AND( sqlchemy.Equals(q.Field("domain_id"), domainId), sqlchemy.Equals(cloudaccounts.Field("share_mode"), api.CLOUD_ACCOUNT_SHARE_MODE_PROVIDER_DOMAIN), ), sqlchemy.AND( sqlchemy.Equals(cloudaccounts.Field("share_mode"), api.CLOUD_ACCOUNT_SHARE_MODE_SYSTEM), sqlchemy.OR( sqlchemy.AND( sqlchemy.Equals(cloudaccounts.Field("public_scope"), rbacutils.ScopeNone), sqlchemy.Equals(cloudaccounts.Field("domain_id"), domainId), ), sqlchemy.AND( sqlchemy.Equals(cloudaccounts.Field("public_scope"), rbacutils.ScopeDomain), sqlchemy.OR( sqlchemy.Equals(cloudaccounts.Field("domain_id"), domainId), sqlchemy.In(cloudaccounts.Field("id"), subq.SubQuery()), ), ), sqlchemy.Equals(cloudaccounts.Field("public_scope"), rbacutils.ScopeSystem), ), ), sqlchemy.AND( sqlchemy.Equals(cloudaccounts.Field("domain_id"), domainId), sqlchemy.Equals(cloudaccounts.Field("share_mode"), api.CLOUD_ACCOUNT_SHARE_MODE_ACCOUNT_DOMAIN), ), )) return q } func (manager *SCloudproviderManager) FilterByOwner(q *sqlchemy.SQuery, owner mcclient.IIdentityProvider, scope rbacutils.TRbacScope) *sqlchemy.SQuery { if owner != nil { switch scope { case rbacutils.ScopeProject, rbacutils.ScopeDomain: if len(owner.GetProjectDomainId()) > 0 { q = manager.filterByDomainId(q, owner.GetProjectDomainId()) } } } return q } func (self *SCloudprovider) getSyncStatus2() string { q := CloudproviderRegionManager.Query() q = q.Equals("cloudprovider_id", self.Id) q = q.NotEquals("sync_status", api.CLOUD_PROVIDER_SYNC_STATUS_IDLE) cnt, err := q.CountWithError() if err != nil { return api.CLOUD_PROVIDER_SYNC_STATUS_ERROR } if cnt > 0 { return api.CLOUD_PROVIDER_SYNC_STATUS_SYNCING } else { return api.CLOUD_PROVIDER_SYNC_STATUS_IDLE } } func (manager *SCloudproviderManager) fetchRecordsByQuery(q *sqlchemy.SQuery) []SCloudprovider { recs := make([]SCloudprovider, 0) err := db.FetchModelObjects(manager, q, &recs) if err != nil { return nil } return recs } func (manager *SCloudproviderManager) initAllRecords() { recs := manager.fetchRecordsByQuery(manager.Query()) for i := range recs { db.Update(&recs[i], func() error { recs[i].SyncStatus = api.CLOUD_PROVIDER_SYNC_STATUS_IDLE return nil }) } } func (provider *SCloudprovider) AllowGetDetailsClirc(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) bool { return db.IsAdminAllowGetSpec(userCred, provider, "client-rc") } func (provider *SCloudprovider) GetDetailsClirc(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) (jsonutils.JSONObject, error) { accessUrl := provider.getAccessUrl() passwd, err := provider.getPassword() if err != nil { return nil, err } account := provider.GetCloudaccount() var options *jsonutils.JSONDict if account != nil { options = account.Options } rc, err := cloudprovider.GetClientRC(provider.Name, accessUrl, provider.Account, passwd, provider.Provider, options) if err != nil { return nil, err } return jsonutils.Marshal(rc), nil } func (manager *SCloudproviderManager) ResourceScope() rbacutils.TRbacScope { return rbacutils.ScopeDomain } func (provider *SCloudprovider) AllowGetDetailsStorageClasses( ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, ) bool { return db.IsAdminAllowGetSpec(userCred, provider, "storage-classes") } func (provider *SCloudprovider) GetDetailsStorageClasses( ctx context.Context, userCred mcclient.TokenCredential, input api.CloudproviderGetStorageClassInput, ) (api.CloudproviderGetStorageClassOutput, error) { output := api.CloudproviderGetStorageClassOutput{} driver, err := provider.GetProvider() if err != nil { return output, httperrors.NewInternalServerError("fail to get provider driver %s", err) } if len(input.CloudregionId) > 0 { _, input.CloudregionResourceInput, err = ValidateCloudregionResourceInput(userCred, input.CloudregionResourceInput) if err != nil { return output, errors.Wrap(err, "ValidateCloudregionResourceInput") } } sc := driver.GetStorageClasses(input.CloudregionId) if sc == nil { return output, httperrors.NewInternalServerError("storage classes not supported") } output.StorageClasses = sc return output, nil } func (provider *SCloudprovider) AllowGetDetailsCannedAcls( ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, ) bool { return db.IsAdminAllowGetSpec(userCred, provider, "canned-acls") } func (provider *SCloudprovider) GetDetailsCannedAcls( ctx context.Context, userCred mcclient.TokenCredential, input api.CloudproviderGetCannedAclInput, ) (api.CloudproviderGetCannedAclOutput, error) { output := api.CloudproviderGetCannedAclOutput{} driver, err := provider.GetProvider() if err != nil { return output, httperrors.NewInternalServerError("fail to get provider driver %s", err) } if len(input.CloudregionId) > 0 { _, input.CloudregionResourceInput, err = ValidateCloudregionResourceInput(userCred, input.CloudregionResourceInput) if err != nil { return output, errors.Wrap(err, "ValidateCloudregionResourceInput") } } output.BucketCannedAcls = driver.GetBucketCannedAcls(input.CloudregionId) output.ObjectCannedAcls = driver.GetObjectCannedAcls(input.CloudregionId) return output, nil } func (provider *SCloudprovider) getAccountShareInfo() apis.SAccountShareInfo { account := provider.GetCloudaccount() return account.getAccountShareInfo() } func (provider *SCloudprovider) IsSharable(reqUsrId mcclient.IIdentityProvider) bool { account := provider.GetCloudaccount() if account != nil { if account.ShareMode == api.CLOUD_ACCOUNT_SHARE_MODE_SYSTEM { return account.IsSharable(reqUsrId) } } return false } func (provider *SCloudprovider) AllowGetDetailsChangeOwnerCandidateDomains(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) bool { return provider.DomainId == userCred.GetProjectDomainId() || db.IsAdminAllowGetSpec(userCred, provider, "change-owner-candidate-domains") } func (provider *SCloudprovider) GetDetailsChangeOwnerCandidateDomains(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) (apis.ChangeOwnerCandidateDomainsOutput, error) { return db.IOwnerResourceBaseModelGetChangeOwnerCandidateDomains(provider) } func (provider *SCloudprovider) GetChangeOwnerCandidateDomainIds() []string { account := provider.GetCloudaccount() if account.ShareMode == api.CLOUD_ACCOUNT_SHARE_MODE_ACCOUNT_DOMAIN { return []string{account.DomainId} } // if account's public_scope=domain and share_mode=provider_domain, only allow to share to specific domains if account.PublicScope == string(rbacutils.ScopeDomain) { sharedDomains := account.GetSharedDomains() return append(sharedDomains, account.DomainId) } return []string{} } func (self *SCloudprovider) SyncProject(ctx context.Context, userCred mcclient.TokenCredential, id string) (string, error) { account := self.GetCloudaccount() if account == nil { return "", fmt.Errorf("failed to get cloudprovider %s account", self.Name) } return account.SyncProject(ctx, userCred, id) } func (self *SCloudprovider) GetSchedtags() []SSchedtag { return GetSchedtags(CloudproviderschedtagManager, self.Id) } func (self *SCloudprovider) GetDynamicConditionInput() *jsonutils.JSONDict { return jsonutils.Marshal(self).(*jsonutils.JSONDict) } func (self *SCloudprovider) AllowPerformSetSchedtag(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) bool { return AllowPerformSetResourceSchedtag(self, ctx, userCred, query, data) } func (self *SCloudprovider) PerformSetSchedtag(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, data jsonutils.JSONObject) (jsonutils.JSONObject, error) { return PerformSetResourceSchedtag(self, ctx, userCred, query, data) } func (self *SCloudprovider) GetSchedtagJointManager() ISchedtagJointManager { return CloudproviderschedtagManager } func (self *SCloudprovider) GetInterVpcNetworks() ([]SInterVpcNetwork, error) { networks := []SInterVpcNetwork{} q := InterVpcNetworkManager.Query().Equals("manager_id", self.Id) err := db.FetchModelObjects(InterVpcNetworkManager, q, &networks) if err != nil { return nil, errors.Wrapf(err, "db.FetchModelObjects") } return networks, nil } func (self *SCloudprovider) SyncInterVpcNetwork(ctx context.Context, userCred mcclient.TokenCredential, interVpcNetworks []cloudprovider.ICloudInterVpcNetwork) ([]SInterVpcNetwork, []cloudprovider.ICloudInterVpcNetwork, compare.SyncResult) { lockman.LockRawObject(ctx, self.Keyword(), fmt.Sprintf("%s-interVpcNetwork", self.Id)) defer lockman.ReleaseRawObject(ctx, self.Keyword(), fmt.Sprintf("%s-interVpcNetwork", self.Id)) result := compare.SyncResult{} localNetworks := []SInterVpcNetwork{} remoteNetworks := []cloudprovider.ICloudInterVpcNetwork{} dbNetworks, err := self.GetInterVpcNetworks() if err != nil { result.Error(errors.Wrapf(err, "GetInterVpcNetworks")) return nil, nil, result } removed := make([]SInterVpcNetwork, 0) commondb := make([]SInterVpcNetwork, 0) commonext := make([]cloudprovider.ICloudInterVpcNetwork, 0) added := make([]cloudprovider.ICloudInterVpcNetwork, 0) err = compare.CompareSets(dbNetworks, interVpcNetworks, &removed, &commondb, &commonext, &added) if err != nil { result.Error(err) return nil, nil, result } for i := 0; i < len(removed); i += 1 { err = removed[i].syncRemove(ctx, userCred) if err != nil { result.DeleteError(err) continue } result.Delete() } for i := 0; i < len(commondb); i += 1 { err = commondb[i].SyncWithCloudInterVpcNetwork(ctx, userCred, commonext[i]) if err != nil { result.UpdateError(errors.Wrapf(err, "SyncWithCloudInterVpcNetwork")) continue } localNetworks = append(localNetworks, commondb[i]) remoteNetworks = append(remoteNetworks, commonext[i]) result.Update() } for i := 0; i < len(added); i += 1 { network, err := InterVpcNetworkManager.newFromCloudInterVpcNetwork(ctx, userCred, added[i], self) if err != nil { result.AddError(err) continue } localNetworks = append(localNetworks, *network) remoteNetworks = append(remoteNetworks, added[i]) result.Add() } return localNetworks, remoteNetworks, result } func (self *SCloudprovider) SyncCallSyncCloudproviderInterVpcNetwork(ctx context.Context, userCred mcclient.TokenCredential) { driver, err := self.GetProvider() if err != nil { log.Errorf("failed to get ICloudProvider from SCloudprovider:%s %s", self.GetName(), self.Id) return } if cloudprovider.IsSupportInterVpcNetwork(driver) { networks, err := driver.GetICloudInterVpcNetworks() if err != nil { log.Errorf("failed to get inter vpc network for Manager %s error: %v", self.Id, err) return } else { localNetwork, remoteNetwork, result := self.SyncInterVpcNetwork(ctx, userCred, networks) if result.IsError() { return } for i := range localNetwork { lockman.LockObject(ctx, &localNetwork[i]) defer lockman.ReleaseObject(ctx, &localNetwork[i]) if localNetwork[i].Deleted { return } localNetwork[i].SyncInterVpcNetworkRouteSets(ctx, userCred, remoteNetwork[i]) } log.Infof("Sync inter vpc network for cloudaccount %s result: %s", self.GetName(), result.Result()) return } } } func (manager *SCloudproviderManager) ListItemExportKeys(ctx context.Context, q *sqlchemy.SQuery, userCred mcclient.TokenCredential, keys stringutils2.SSortedStrings) (*sqlchemy.SQuery, error) { q, err := manager.SEnabledStatusStandaloneResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys) if err != nil { return nil, errors.Wrap(err, "SEnabledStatusStandaloneResourceBaseManager.ListItemExportKeys") } q, err = manager.SProjectizedResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys) if err != nil { return nil, errors.Wrapf(err, "SProjectizedResourceBaseManager.ListItemExportKeys") } q, err = manager.SProjectMappingResourceBaseManager.ListItemExportKeys(ctx, q, userCred, keys) if err != nil { return nil, errors.Wrapf(err, "SProjectMappingResourceBaseManager.ListItemExportKeys") } return q, nil } func (self *SCloudprovider) AllowPerformProjectMapping(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) bool { return db.IsAdminAllowPerform(userCred, self, "project-mapping") } // 绑定同步策略 func (self *SCloudprovider) PerformProjectMapping(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject, input api.CloudaccountProjectMappingInput) (jsonutils.JSONObject, error) { if len(input.ProjectMappingId) > 0 { _, err := validators.ValidateModel(userCred, ProjectMappingManager, &input.ProjectMappingId) if err != nil { return nil, err } if len(self.ProjectMappingId) > 0 && self.ProjectMappingId != input.ProjectMappingId { return nil, httperrors.NewInputParameterError("cloudprovider %s has aleady bind project mapping %s", self.Name, self.ProjectMappingId) } } // no changes if self.ProjectMappingId == input.ProjectMappingId { return nil, nil } _, err := db.Update(self, func() error { self.ProjectMappingId = input.ProjectMappingId return nil }) if err != nil { return nil, err } return nil, refreshPmCaches() }
ams) if err != nil { return "", "", errors.Wrap(err, "Projects.Create") } projectId, err := resp.GetString("id") if err != nil { return "", "", errors.Wrap(err, "resp.GetString") } return domainId, projectId, nil } func (self *SCloudaccount) getOrCreateTenant(ctx context.Context, name, domainId, projectId, desc string) (string, string, error) { if len(domainId) == 0 { domainId = self.DomainId } tenant, err := getTenant(ctx, projectId, name) if err != nil { if errors.Cause(err) != sql.ErrNoRows { return
calc.rs
enum OpType { Add, // + Sub, // - Mul, // * Div, // / Modulus, // % Pow, // ^ } enum Element { Operator(OpType), Value(f32), Variable(String), } use stack::List; use calc::OpType::*; use calc::Element::*; use std::str; fn get_op_prioroty(op_ch: &char) -> i32 { match *op_ch { '(' => 0, ')' => 1, '+' | '-' => 2, '*' | '/' | '%' => 3, '^' => 4, _ => -1, } } fn get_polen_notation(expression: &String) -> Result<String, String> { let mut result = String::new(); let mut stack = String::new(); let mut operand = false; let mut prev_space = false; let mut idx = -1; 'process: for c in expression.chars() { idx += 1; if prev_space && c != ' ' { result.push(' '); prev_space = false; } match c { '0'...'9' | '.' => { result.push(c); operand = true; } '(' => { stack.push(c); } ')' => { loop { if (stack.is_empty()) { return Err("Can't find closing parenthesis!".to_string()); } let stack_char: char; match (stack.pop()) { Some(x) => stack_char = x, None => return Err("End of stack is reached!".to_string()), } if (stack_char != '(') { result.push(stack_char); } else { break; } } } ' ' => { prev_space = true; } _ => { if !operand && c == '-' { operand = true; result.push(c); continue 'process; } let prior = get_op_prioroty(&c); if (prior != -1) { operand = false; loop { if !stack.is_empty() { // get last operator priority let stack_operator = &(stack.as_bytes()[stack.len() - 1] as char); if prior <= get_op_prioroty(stack_operator) { stack.pop(); result.push(*stack_operator); } else { break; } } else { // return Err("Stack is empty".to_string()); break; } } stack.push(c); prev_space = true; } else { return Err(format!("Unexpected char: <{}> at pos {}", c, &idx)); } } } }
Some(x) => result.push(x), None => return Err("unexpected".to_string()), } } return Ok(result); } // fn read_f32(option: Option<f32>) -> f32 { // match option { // Some(x) => x, // None => panic!("Ohh, we got end of stack!"), // } // } fn calc_expression(expr_stack: List<Element>) -> Option<f32> { let mut val_stack: List<f32> = List::new(); for x in expr_stack.iter() { match x { &Operator(ref op) => { let val2: f32;// = read_f32(val_stack.pop()); let val1: f32;// = read_f32(val_stack.pop()); match val_stack.pop() { Some(x) => val2 = x, None => return None, } match val_stack.pop() { Some(x) => val1 = x, None => return None, } val_stack.push(match *op { Add => val1 + val2, Sub => val1 - val2, Mul => val1 * val2, Div => val1 / val2, Modulus => val1 % val2, Pow => val1.powf(val2), }); } &Value(ref val) => { val_stack.push(*val); } &Variable(ref var) => unimplemented!(), } } return match (val_stack.pop()) { Some(x) => { match val_stack.pop() { // not all values were used, it means that expression looks like this "10 /(2 4)" Some(y) => { return None; } None => Some(x), } } None => None, }; } fn get_f32_from_string(val: &String) -> f32 { match val.parse::<f32>() { Ok(x) => x, Err(_) => 0.0, } } fn get_element_by_string(token: &String) -> Element { match (token.as_bytes()[0] as char) { '+' => Operator(Add), '-' => { if token.len() > 1 { Value(get_f32_from_string(token)) } else { Operator(Sub) } } '*' => Operator(Mul), '/' => Operator(Div), '%' => Operator(Modulus), '^' => Operator(Pow), _ => Value(get_f32_from_string(token)), } } // fn print_stack(stack: &List<Element>) { // for x in stack.iter() { // print!("{} ", // match *x { // Operator(Add) => "+".to_string(), // Operator(Sub) => "-".to_string(), // Operator(Mul) => "*".to_string(), // Operator(Div) => "/".to_string(), // Operator(Modulus) => "%".to_string(), // Operator(Pow) => "^".to_string(), // Value(val) => val.to_string(), // Variable(_) => "ITS VAR".to_string(), // }); // } // print!("/n"); // } fn string_to_list(str_expr: &String) -> List<Element> { let mut tmp: List<Element> = List::new(); let mut token: String = String::new(); let mut operand = false; let mut prev_operator = false; 'tokenize: for ch in str_expr.chars() { match ch { '0'...'9' | '.' => { token.push(ch); operand = true; prev_operator = false; } '+' | '-' | '*' | '/' | '%' | '^' => { if !operand && ch == '-' && !prev_operator { token.push(ch); operand = true; continue 'tokenize; } if !token.is_empty() { tmp.push(get_element_by_string(&(token))); token.clear(); } token.push(ch); tmp.push(get_element_by_string(&(token))); token.clear(); operand = false; prev_operator = true; } ' ' => { prev_operator = false; if !token.is_empty() { operand = false; tmp.push(get_element_by_string(&(token))); token.clear(); } } _ => println!("unexpected situation"), } } // print_stack(&tmp); let mut stack: List<Element> = List::new(); while let Some(el) = tmp.pop() { stack.push(el); } return stack; } pub fn calc(infix_str: &String) -> Result<f32, String> { let stack2; match get_polen_notation(infix_str) { Ok(x) => { stack2 = string_to_list(&x); match calc_expression(stack2) { Some(x) => Ok(x), None => Err("Incorrect expression!".to_string()), } } Err(x) => Err(x), } } #[cfg(test)] mod calc_test { use super::calc; #[test] fn calc_basics() { let tests_result_4 = vec!["2+2", "2 +2", "2 + 2", " 2 + 2 ", "(2+2)", "( 2+2)", " ( 2 +2)", "(2+ 2)", "(2+ 2 )", "(2+ 2 ) ", "(((2+2)))", "(((2)) + 2)", "4-0", "8/2", "8*0.5", "9.5 - 5.5", "9 % 5", "-4 + 8", "8 + -4", "-2 + 6"]; for test in tests_result_4 { match calc(&test.to_string()) { Ok(x) => assert_eq!(x, 4.0), Err(x) => { println!("FAILED TEST: {}, error: {} \n", test, x); assert!(false); } } } } #[test] fn calc_long_expression() { let tests_result_2648 = vec!["55 * (3552 / 74) + 2^3", "55 * (3552 / (37 * 2)) + 2^3", "(54 + 1) * (3552 / (37 * 2)) + 2^3", "55 * (3552 / (147 % 74 + 1)) + 2^3", "55 * (3552 / 74) + 2^(2+1)", "55 * (3552 / 74) + 2^(4-1)", "55 * (3552 / 74) + 2^(0.5 * 6)", "55 * (3552 / 74) + 2^(6 * 0.5)", "2^3 + 55 / (1/(3552 * (1/74)))", "55 * (3552 / (74-(-74+74)) ) + 2^3", "55 * (3552 / 74) + 2^3 - (-69 - 9) + (-69 - 9)"]; for test in tests_result_2648 { match calc(&test.to_string()) { Ok(x) => assert_eq!(x, 2648.0), Err(x) => { println!("FAILED TEST: {}, error: {} \n", test, x); assert!(false); } } } } }
while !stack.is_empty() { match stack.pop() {
update_sdktools.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Script that reads omahaproxy and gsutil to determine a version of the sdk_tools bundle to use. Please note the differences between this script and update_nacl_manifest.py: update_sdktools.py is run by a SDK-team developer to assist in updating to a
any changes. Instead it modifies the manifest file in commondatastorage.""" import collections import difflib import json import optparse import re import sys import urllib2 from manifest_util import DownloadAndComputeHash, DictToJSON from update_nacl_manifest import RealDelegate SDK_TOOLS_DESCRIPTION_FORMAT = 'Native Client SDK Tools, revision %d' BUCKET_PATH = 'nativeclient-mirror/nacl/nacl_sdk/' GS_BUCKET_PATH = 'gs://' + BUCKET_PATH HTTPS_BUCKET_PATH = 'https://commondatastorage.googleapis.com/' + BUCKET_PATH def GetSdkToolsUrl(revision): return HTTPS_BUCKET_PATH + 'trunk.%d/sdk_tools.tgz' % revision def GetTrunkRevisions(delegate): urls = delegate.GsUtil_ls(GS_BUCKET_PATH) revisions = [] for url in urls: m = re.match(GS_BUCKET_PATH + 'trunk\.(\d+)', url) if m: revisions.append((int(m.group(1)), url)) return sorted(revisions) def FindMostRecentSdkTools(delegate): for revision, url in reversed(GetTrunkRevisions(delegate)): sdktools_url = url + 'sdk_tools.tgz' if delegate.GsUtil_ls(sdktools_url): return revision, sdktools_url return None def JsonLoadFromString(json_string): if sys.version_info > (2, 7): return json.loads(json_string, object_pairs_hook=collections.OrderedDict) else: return json.loads(json_string) def GetBundleByName(bundles, name): for bundle in bundles: if bundle['name'] == name: return bundle return None def UpdateSdkToolsBundle(sdk_tools_bundle, revision, url, sha1, size): sdk_tools_bundle['description'] = SDK_TOOLS_DESCRIPTION_FORMAT % revision sdk_tools_bundle['revision'] = revision # Update archive for each OS for archive in sdk_tools_bundle['archives']: archive['url'] = url archive['checksum']['sha1'] = sha1 archive['size'] = size def UpdateManifest(manifest, revision): sdk_tools_bundle = GetBundleByName(manifest['bundles'], 'sdk_tools') url = GetSdkToolsUrl(revision) sha1, size = DownloadAndComputeHash(urllib2.urlopen(url)) UpdateSdkToolsBundle(sdk_tools_bundle, revision, url, sha1, size) def UpdateManifestFileToRevision(filename, revision): with open(filename) as stream: manifest_string = stream.read() manifest = JsonLoadFromString(manifest_string) UpdateManifest(manifest, revision) new_manifest_string = DictToJSON(manifest) diff_string = ''.join(difflib.unified_diff(manifest_string.splitlines(1), new_manifest_string.splitlines(1))) print 'diff %s' % filename print diff_string print with open(filename, 'w') as stream: stream.write(new_manifest_string) def main(args): parser = optparse.OptionParser() parser.add_option('-r', '--revision', help='set revision manually, rather than using the latest version') options, args = parser.parse_args(args[1:]) if len(args) != 0: parser.error('Unexpected args: %s' % ', '.join(args)) # TODO(binji): http://crbug.com/169047. Rename RealDelegate to something else. delegate = RealDelegate() if not options.revision: revision, _ = FindMostRecentSdkTools(delegate) else: revision = int(options.revision) UpdateManifestFileToRevision('json/naclsdk_manifest0.json', revision) UpdateManifestFileToRevision('json/naclsdk_manifest2.json', revision) if __name__ == '__main__': sys.exit(main(sys.argv))
new sdk_tools bundle. A file on the developer's hard drive is modified, and must be checked in for the new sdk_tools bundle to be used. update_nacl_manifest.py is customarily run by a cron job, and does not check in
sprite_visibility.rs
//! Transparency, visibility sorting and camera centroid culling for 2D Sprites. use std::cmp::Ordering; use amethyst_core::{ ecs::*, math::{Point3, Vector3}, transform::Transform, Hidden, HiddenPropagate, }; #[cfg(feature = "profiler")] use thread_profiler::profile_scope; use crate::{ camera::{ActiveCamera, Camera}, sprite::SpriteRender, transparent::Transparent, }; /// Resource for controlling what entities should be rendered, and whether to draw them ordered or /// not, which is useful for transparent surfaces. #[derive(Default, Debug)] pub struct SpriteVisibility { /// Visible entities that can be drawn in any order pub visible_unordered: Vec<Entity>, /// Visible entities that need to be drawn in the given order pub visible_ordered: Vec<Entity>, } #[derive(Debug, Clone)] struct Internals { entity: Entity, centroid: Point3<f32>, camera_distance: f32, from_camera: Vector3<f32>, } /// Determines what entities to be drawn. Will also sort transparent entities back to front based on /// position on the Z axis. /// /// The sprite render pass should draw all sprites without semi-transparent pixels, then draw the /// sprites with semi-transparent pixels from far to near. /// /// Note that this should run after `Transform` has been updated for the current frame, and /// before rendering occurs. #[derive(Debug)] pub struct SpriteVisibilitySortingSystem; impl System<'_> for SpriteVisibilitySortingSystem { fn
(&mut self) -> Box<dyn ParallelRunnable> { let mut transparent_centroids: Vec<Internals> = Vec::default(); Box::new( SystemBuilder::<()>::new("SpriteVisibilitySortingSystem") .read_resource::<ActiveCamera>() .write_resource::<SpriteVisibility>() .with_query(<(&Camera, &Transform)>::query()) .with_query(<(Entity, &Camera, &Transform)>::query()) .with_query( <(Entity, &Transform, &SpriteRender, &Transparent)>::query() .filter(!component::<Hidden>() & !component::<HiddenPropagate>()), ) .with_query(<(Entity, &Transform, &SpriteRender)>::query().filter( !component::<Transparent>() & !component::<Hidden>() & !component::<HiddenPropagate>(), )) .build( move |commands, world, (active_camera, visibility), ( camera_query1, camera_query2, transparent_query, non_transparent_query, )| { #[cfg(feature = "profiler")] profile_scope!("sprite_visibility_system"); transparent_centroids.clear(); visibility.visible_ordered.clear(); visibility.visible_unordered.clear(); let origin = Point3::origin(); let (camera, camera_transform) = match active_camera.entity.map_or_else( || camera_query1.iter(world).next(), |e| { camera_query2 .iter(world) .find(|(camera_entity, _, _)| **camera_entity == e) .map(|(_entity, camera, camera_transform)| { (camera, camera_transform) }) }, ) { Some(r) => r, None => return, }; let camera_backward = camera_transform.global_matrix().column(2).xyz(); let camera_centroid = camera_transform.global_matrix().transform_point(&origin); transparent_centroids.extend( transparent_query .iter(world) .map(|(e, t, _, _)| { (*e, t.global_matrix().transform_point(&origin)) }) // filter entities behind the camera .filter(|(_, c)| (c - camera_centroid).dot(&camera_backward) < 0.0) .map(|(entity, centroid)| { Internals { entity, centroid, camera_distance: (centroid.z - camera_centroid.z).abs(), from_camera: centroid - camera_centroid, } }), ); transparent_centroids.sort_by(|a, b| { b.camera_distance .partial_cmp(&a.camera_distance) .unwrap_or(Ordering::Equal) }); visibility .visible_ordered .extend(transparent_centroids.iter().map(|c| c.entity)); visibility.visible_unordered.extend( non_transparent_query .iter(world) .map(|(e, t, _)| (e, t.global_matrix().transform_point(&origin))) // filter entities behind the camera .filter(|(_, c)| (c - camera_centroid).dot(&camera_backward) < 0.0) .map(|(entity, _)| entity), ); }, ), ) } }
build
request.py
from typing import Callable import django from django.http import HttpRequest try: from django.utils.datastructures import CaseInsensitiveMapping except ImportError: from .datastructures import CaseInsensitiveMapping # HttpHeaders copypasted from django 3.0 codebase class HttpHeaders(CaseInsensitiveMapping): HTTP_PREFIX = "HTTP_" # PEP 333 gives two headers which aren't prepended with HTTP_. UNPREFIXED_HEADERS = {"CONTENT_TYPE", "CONTENT_LENGTH"} def __init__(self, environ): headers = {} for header, value in environ.items(): name = self.parse_header_name(header) if name: headers[name] = value super().__init__(headers) def __getitem__(self, key): """Allow header lookup using underscores in place of hyphens.""" return super().__getitem__(key.replace("_", "-")) @classmethod def
(cls, header): if header.startswith(cls.HTTP_PREFIX): start = len(cls.HTTP_PREFIX) header = header[start:] elif header not in cls.UNPREFIXED_HEADERS: return None return header.replace("_", "-").title() def get_headers_old(request): return HttpHeaders(request.META) def get_headers_v3(request): return request.headers get_headers: Callable[[HttpRequest], HttpHeaders] if django.VERSION[0] < 3: get_headers = get_headers_old else: get_headers = get_headers_v3
parse_header_name
provisioning_approval_request_change.pb.go
// Code generated by protoc-gen-goten-go // File: edgelq/devices/proto/v1alpha/provisioning_approval_request_change.proto // DO NOT EDIT!!! package provisioning_approval_request import ( "fmt" "reflect" "sync" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" preflect "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoimpl" ) // proto imports import ( provisioning_policy "github.com/cloudwan/edgelq-sdk/devices/resources/v1alpha/provisioning_policy" field_mask "google.golang.org/genproto/protobuf/field_mask" ) // Reference imports to suppress errors if they are not otherwise used. var ( _ = fmt.Errorf _ = reflect.Method{} _ = sync.Once{} _ = protojson.MarshalOptions{} _ = proto.MarshalOptions{} _ = preflect.Value{} _ = protoimpl.DescBuilder{} ) // make sure we're using proto imports var ( _ = &provisioning_policy.ProvisioningPolicy{} _ = &field_mask.FieldMask{} ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // ProvisioningApprovalRequestChange is used by Watch notifications Responses to // describe change of single ProvisioningApprovalRequest One of Added, Modified, // Removed type ProvisioningApprovalRequestChange struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // ProvisioningApprovalRequest change // // Types that are valid to be assigned to ChangeType: // *ProvisioningApprovalRequestChange_Added_ // *ProvisioningApprovalRequestChange_Modified_ // *ProvisioningApprovalRequestChange_Current_ // *ProvisioningApprovalRequestChange_Removed_ ChangeType isProvisioningApprovalRequestChange_ChangeType `protobuf_oneof:"change_type"` } func (m *ProvisioningApprovalRequestChange) Reset() { *m = ProvisioningApprovalRequestChange{} if protoimpl.UnsafeEnabled { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) ms.StoreMessageInfo(mi) } } func (m *ProvisioningApprovalRequestChange) String() string { return protoimpl.X.MessageStringOf(m) } func (*ProvisioningApprovalRequestChange) ProtoMessage() {} func (m *ProvisioningApprovalRequestChange) ProtoReflect() preflect.Message { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[0] if protoimpl.UnsafeEnabled && m != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(m) } func (*ProvisioningApprovalRequestChange) GotenMessage() {} // Deprecated, Use ProvisioningApprovalRequestChange.ProtoReflect.Descriptor instead. func (*ProvisioningApprovalRequestChange) Descriptor() ([]byte, []int) { return edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescGZIP(), []int{0} } func (m *ProvisioningApprovalRequestChange) Unmarshal(b []byte) error { return proto.Unmarshal(b, m) } func (m *ProvisioningApprovalRequestChange) Marshal() ([]byte, error) { return proto.Marshal(m) } func (m *ProvisioningApprovalRequestChange) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{}.Marshal(m) } func (m *ProvisioningApprovalRequestChange) UnmarshalJSON(data []byte) error { return protojson.Unmarshal(data, m) } type isProvisioningApprovalRequestChange_ChangeType interface { isProvisioningApprovalRequestChange_ChangeType() } type ProvisioningApprovalRequestChange_Added_ struct { // Added is returned when watched document is added, either created or // enters Query view Added *ProvisioningApprovalRequestChange_Added `protobuf:"bytes,1,opt,name=added,proto3,oneof" firestore:"added"` } type ProvisioningApprovalRequestChange_Modified_ struct { // Modified is returned when watched document is modified Modified *ProvisioningApprovalRequestChange_Modified `protobuf:"bytes,2,opt,name=modified,proto3,oneof" firestore:"modified"` } type ProvisioningApprovalRequestChange_Current_ struct { // Current is returned in stateless watch when document enters query view or // is modified within. Current *ProvisioningApprovalRequestChange_Current `protobuf:"bytes,4,opt,name=current,proto3,oneof" firestore:"current"` } type ProvisioningApprovalRequestChange_Removed_ struct { // Removed is returned when ProvisioningApprovalRequest is deleted or leaves // Query view Removed *ProvisioningApprovalRequestChange_Removed `protobuf:"bytes,3,opt,name=removed,proto3,oneof" firestore:"removed"` } func (*ProvisioningApprovalRequestChange_Added_) isProvisioningApprovalRequestChange_ChangeType() {} func (*ProvisioningApprovalRequestChange_Modified_) isProvisioningApprovalRequestChange_ChangeType() { } func (*ProvisioningApprovalRequestChange_Current_) isProvisioningApprovalRequestChange_ChangeType() {} func (*ProvisioningApprovalRequestChange_Removed_) isProvisioningApprovalRequestChange_ChangeType() {} func (m *ProvisioningApprovalRequestChange) GetChangeType() isProvisioningApprovalRequestChange_ChangeType { if m != nil { return m.ChangeType } return nil } func (m *ProvisioningApprovalRequestChange) GetAdded() *ProvisioningApprovalRequestChange_Added { if x, ok := m.GetChangeType().(*ProvisioningApprovalRequestChange_Added_); ok { return x.Added } return nil } func (m *ProvisioningApprovalRequestChange) GetModified() *ProvisioningApprovalRequestChange_Modified { if x, ok := m.GetChangeType().(*ProvisioningApprovalRequestChange_Modified_); ok { return x.Modified } return nil } func (m *ProvisioningApprovalRequestChange) GetCurrent() *ProvisioningApprovalRequestChange_Current { if x, ok := m.GetChangeType().(*ProvisioningApprovalRequestChange_Current_); ok { return x.Current } return nil } func (m *ProvisioningApprovalRequestChange) GetRemoved() *ProvisioningApprovalRequestChange_Removed { if x, ok := m.GetChangeType().(*ProvisioningApprovalRequestChange_Removed_); ok { return x.Removed } return nil } func (m *ProvisioningApprovalRequestChange) SetChangeType(ofv isProvisioningApprovalRequestChange_ChangeType) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "isProvisioningApprovalRequestChange_ChangeType", "ProvisioningApprovalRequestChange")) } m.ChangeType = ofv } func (m *ProvisioningApprovalRequestChange) SetAdded(fv *ProvisioningApprovalRequestChange_Added) { m.SetChangeType(&ProvisioningApprovalRequestChange_Added_{Added: fv}) } func (m *ProvisioningApprovalRequestChange) SetModified(fv *ProvisioningApprovalRequestChange_Modified) { m.SetChangeType(&ProvisioningApprovalRequestChange_Modified_{Modified: fv}) } func (m *ProvisioningApprovalRequestChange) SetCurrent(fv *ProvisioningApprovalRequestChange_Current) { m.SetChangeType(&ProvisioningApprovalRequestChange_Current_{Current: fv}) } func (m *ProvisioningApprovalRequestChange) SetRemoved(fv *ProvisioningApprovalRequestChange_Removed) { m.SetChangeType(&ProvisioningApprovalRequestChange_Removed_{Removed: fv}) } // ProvisioningApprovalRequest has been added to query view type ProvisioningApprovalRequestChange_Added struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields ProvisioningApprovalRequest *ProvisioningApprovalRequest `protobuf:"bytes,1,opt,name=provisioning_approval_request,json=provisioningApprovalRequest,proto3" json:"provisioning_approval_request,omitempty" firestore:"provisioningApprovalRequest"` // Integer describing index of added ProvisioningApprovalRequest in // resulting query view. ViewIndex int32 `protobuf:"varint,2,opt,name=view_index,json=viewIndex,proto3" json:"view_index,omitempty" firestore:"viewIndex"` } func (m *ProvisioningApprovalRequestChange_Added) Reset() { *m = ProvisioningApprovalRequestChange_Added{} if protoimpl.UnsafeEnabled { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) ms.StoreMessageInfo(mi) } } func (m *ProvisioningApprovalRequestChange_Added) String() string { return protoimpl.X.MessageStringOf(m) } func (*ProvisioningApprovalRequestChange_Added) ProtoMessage() {} func (m *ProvisioningApprovalRequestChange_Added) ProtoReflect() preflect.Message { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[1] if protoimpl.UnsafeEnabled && m != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(m) } func (*ProvisioningApprovalRequestChange_Added) GotenMessage() {} // Deprecated, Use ProvisioningApprovalRequestChange_Added.ProtoReflect.Descriptor instead. func (*ProvisioningApprovalRequestChange_Added) Descriptor() ([]byte, []int) { return edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescGZIP(), []int{0, 0} } func (m *ProvisioningApprovalRequestChange_Added) Unmarshal(b []byte) error { return proto.Unmarshal(b, m) } func (m *ProvisioningApprovalRequestChange_Added) Marshal() ([]byte, error) { return proto.Marshal(m) } func (m *ProvisioningApprovalRequestChange_Added) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{}.Marshal(m) } func (m *ProvisioningApprovalRequestChange_Added) UnmarshalJSON(data []byte) error { return protojson.Unmarshal(data, m) } func (m *ProvisioningApprovalRequestChange_Added) GetProvisioningApprovalRequest() *ProvisioningApprovalRequest { if m != nil { return m.ProvisioningApprovalRequest } return nil } func (m *ProvisioningApprovalRequestChange_Added) GetViewIndex() int32 { if m != nil { return m.ViewIndex } return int32(0) } func (m *ProvisioningApprovalRequestChange_Added) SetProvisioningApprovalRequest(fv *ProvisioningApprovalRequest) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "ProvisioningApprovalRequest", "ProvisioningApprovalRequestChange_Added")) } m.ProvisioningApprovalRequest = fv } func (m *ProvisioningApprovalRequestChange_Added) SetViewIndex(fv int32) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "ViewIndex", "ProvisioningApprovalRequestChange_Added")) } m.ViewIndex = fv } // ProvisioningApprovalRequest changed some of it's fields - contains either // full document or masked change type ProvisioningApprovalRequestChange_Modified struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Name of modified ProvisioningApprovalRequest Name *Name `protobuf:"bytes,1,opt,customtype=Name,name=name,proto3" json:"name,omitempty" firestore:"name"` // New version of ProvisioningApprovalRequest or masked difference, // depending on mask_changes instrumentation of issued // [WatchProvisioningApprovalRequestRequest] or // [WatchProvisioningApprovalRequestsRequest] ProvisioningApprovalRequest *ProvisioningApprovalRequest `protobuf:"bytes,2,opt,name=provisioning_approval_request,json=provisioningApprovalRequest,proto3" json:"provisioning_approval_request,omitempty" firestore:"provisioningApprovalRequest"` // Used when mask_changes is set, contains field paths of modified // properties. FieldMask *ProvisioningApprovalRequest_FieldMask `protobuf:"bytes,3,opt,customtype=ProvisioningApprovalRequest_FieldMask,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty" firestore:"fieldMask"` // Previous view index specifies previous position of modified // ProvisioningApprovalRequest. When modification doesn't affect sorted // order, value will remain identical to [view_index]. PreviousViewIndex int32 `protobuf:"varint,4,opt,name=previous_view_index,json=previousViewIndex,proto3" json:"previous_view_index,omitempty" firestore:"previousViewIndex"` // Integer specifying ProvisioningApprovalRequest new index in resulting // query view. ViewIndex int32 `protobuf:"varint,5,opt,name=view_index,json=viewIndex,proto3" json:"view_index,omitempty" firestore:"viewIndex"` } func (m *ProvisioningApprovalRequestChange_Modified) Reset() { *m = ProvisioningApprovalRequestChange_Modified{} if protoimpl.UnsafeEnabled { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) ms.StoreMessageInfo(mi) } } func (m *ProvisioningApprovalRequestChange_Modified) String() string { return protoimpl.X.MessageStringOf(m) } func (*ProvisioningApprovalRequestChange_Modified) ProtoMessage() {} func (m *ProvisioningApprovalRequestChange_Modified) ProtoReflect() preflect.Message { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[2] if protoimpl.UnsafeEnabled && m != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(m) } func (*ProvisioningApprovalRequestChange_Modified) GotenMessage() {} // Deprecated, Use ProvisioningApprovalRequestChange_Modified.ProtoReflect.Descriptor instead. func (*ProvisioningApprovalRequestChange_Modified) Descriptor() ([]byte, []int) { return edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescGZIP(), []int{0, 1} } func (m *ProvisioningApprovalRequestChange_Modified) Unmarshal(b []byte) error { return proto.Unmarshal(b, m) } func (m *ProvisioningApprovalRequestChange_Modified) Marshal() ([]byte, error) { return proto.Marshal(m) } func (m *ProvisioningApprovalRequestChange_Modified) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{}.Marshal(m) } func (m *ProvisioningApprovalRequestChange_Modified) UnmarshalJSON(data []byte) error { return protojson.Unmarshal(data, m) } func (m *ProvisioningApprovalRequestChange_Modified) GetName() *Name { if m != nil { return m.Name } return nil } func (m *ProvisioningApprovalRequestChange_Modified) GetProvisioningApprovalRequest() *ProvisioningApprovalRequest { if m != nil { return m.ProvisioningApprovalRequest } return nil } func (m *ProvisioningApprovalRequestChange_Modified) GetFieldMask() *ProvisioningApprovalRequest_FieldMask { if m != nil { return m.FieldMask } return nil } func (m *ProvisioningApprovalRequestChange_Modified) GetPreviousViewIndex() int32 { if m != nil { return m.PreviousViewIndex } return int32(0) } func (m *ProvisioningApprovalRequestChange_Modified) GetViewIndex() int32 { if m != nil { return m.ViewIndex } return int32(0) } func (m *ProvisioningApprovalRequestChange_Modified) SetName(fv *Name) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "Name", "ProvisioningApprovalRequestChange_Modified")) } m.Name = fv } func (m *ProvisioningApprovalRequestChange_Modified) SetProvisioningApprovalRequest(fv *ProvisioningApprovalRequest) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "ProvisioningApprovalRequest", "ProvisioningApprovalRequestChange_Modified")) } m.ProvisioningApprovalRequest = fv } func (m *ProvisioningApprovalRequestChange_Modified) SetFieldMask(fv *ProvisioningApprovalRequest_FieldMask) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "FieldMask", "ProvisioningApprovalRequestChange_Modified")) } m.FieldMask = fv } func (m *ProvisioningApprovalRequestChange_Modified) SetPreviousViewIndex(fv int32) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "PreviousViewIndex", "ProvisioningApprovalRequestChange_Modified")) } m.PreviousViewIndex = fv } func (m *ProvisioningApprovalRequestChange_Modified) SetViewIndex(fv int32) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "ViewIndex", "ProvisioningApprovalRequestChange_Modified")) } m.ViewIndex = fv } // ProvisioningApprovalRequest has been added or modified in a query view. // Version used for stateless watching type ProvisioningApprovalRequestChange_Current struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields ProvisioningApprovalRequest *ProvisioningApprovalRequest `protobuf:"bytes,1,opt,name=provisioning_approval_request,json=provisioningApprovalRequest,proto3" json:"provisioning_approval_request,omitempty" firestore:"provisioningApprovalRequest"` } func (m *ProvisioningApprovalRequestChange_Current) Reset() { *m = ProvisioningApprovalRequestChange_Current{} if protoimpl.UnsafeEnabled { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) ms.StoreMessageInfo(mi) } } func (m *ProvisioningApprovalRequestChange_Current) String() string { return protoimpl.X.MessageStringOf(m) } func (*ProvisioningApprovalRequestChange_Current) ProtoMessage() {} func (m *ProvisioningApprovalRequestChange_Current) ProtoReflect() preflect.Message { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[3] if protoimpl.UnsafeEnabled && m != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(m) } func (*ProvisioningApprovalRequestChange_Current) GotenMessage() {} // Deprecated, Use ProvisioningApprovalRequestChange_Current.ProtoReflect.Descriptor instead. func (*ProvisioningApprovalRequestChange_Current) Descriptor() ([]byte, []int) { return edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescGZIP(), []int{0, 2} } func (m *ProvisioningApprovalRequestChange_Current) Unmarshal(b []byte) error { return proto.Unmarshal(b, m) } func (m *ProvisioningApprovalRequestChange_Current) Marshal() ([]byte, error) { return proto.Marshal(m) } func (m *ProvisioningApprovalRequestChange_Current) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{}.Marshal(m) } func (m *ProvisioningApprovalRequestChange_Current) UnmarshalJSON(data []byte) error { return protojson.Unmarshal(data, m) } func (m *ProvisioningApprovalRequestChange_Current) GetProvisioningApprovalRequest() *ProvisioningApprovalRequest { if m != nil { return m.ProvisioningApprovalRequest } return nil } func (m *ProvisioningApprovalRequestChange_Current) SetProvisioningApprovalRequest(fv *ProvisioningApprovalRequest) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "ProvisioningApprovalRequest", "ProvisioningApprovalRequestChange_Current")) } m.ProvisioningApprovalRequest = fv } // Removed is returned when ProvisioningApprovalRequest is deleted or leaves // Query view type ProvisioningApprovalRequestChange_Removed struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Name *Name `protobuf:"bytes,1,opt,customtype=Name,name=name,proto3" json:"name,omitempty" firestore:"name"` // Integer specifying removed ProvisioningApprovalRequest index. Not // populated in stateless watch type. ViewIndex int32 `protobuf:"varint,2,opt,name=view_index,json=viewIndex,proto3" json:"view_index,omitempty" firestore:"viewIndex"` } func (m *ProvisioningApprovalRequestChange_Removed) Reset() { *m = ProvisioningApprovalRequestChange_Removed{} if protoimpl.UnsafeEnabled { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) ms.StoreMessageInfo(mi) } } func (m *ProvisioningApprovalRequestChange_Removed) String() string { return protoimpl.X.MessageStringOf(m) } func (*ProvisioningApprovalRequestChange_Removed) ProtoMessage() {} func (m *ProvisioningApprovalRequestChange_Removed) ProtoReflect() preflect.Message { mi := &edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[4] if protoimpl.UnsafeEnabled && m != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(m)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(m) } func (*ProvisioningApprovalRequestChange_Removed) GotenMessage() {} // Deprecated, Use ProvisioningApprovalRequestChange_Removed.ProtoReflect.Descriptor instead. func (*ProvisioningApprovalRequestChange_Removed) Descriptor() ([]byte, []int) { return edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescGZIP(), []int{0, 3} } func (m *ProvisioningApprovalRequestChange_Removed) Unmarshal(b []byte) error { return proto.Unmarshal(b, m) } func (m *ProvisioningApprovalRequestChange_Removed) Marshal() ([]byte, error) { return proto.Marshal(m) } func (m *ProvisioningApprovalRequestChange_Removed) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{}.Marshal(m) } func (m *ProvisioningApprovalRequestChange_Removed) UnmarshalJSON(data []byte) error { return protojson.Unmarshal(data, m) } func (m *ProvisioningApprovalRequestChange_Removed) GetName() *Name { if m != nil { return m.Name } return nil } func (m *ProvisioningApprovalRequestChange_Removed) GetViewIndex() int32 { if m != nil { return m.ViewIndex } return int32(0) } func (m *ProvisioningApprovalRequestChange_Removed) SetName(fv *Name) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "Name", "ProvisioningApprovalRequestChange_Removed")) } m.Name = fv } func (m *ProvisioningApprovalRequestChange_Removed) SetViewIndex(fv int32) { if m == nil { panic(fmt.Errorf("can't set %s on nil %s", "ViewIndex", "ProvisioningApprovalRequestChange_Removed")) } m.ViewIndex = fv } var edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto preflect.FileDescriptor var edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDesc = []byte{ 0x0a, 0x47, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x71, 0x2f, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x6e, 0x74, 0x74, 0x2e, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x74, 0x65, 0x6e, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x67, 0x6f, 0x74, 0x65, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x74, 0x65, 0x6e, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x67, 0x6f, 0x74, 0x65, 0x6e, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x40, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x71, 0x2f, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd7, 0x09, 0x0a, 0x21, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x54, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x6e, 0x74, 0x74, 0x2e, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x41, 0x64, 0x64, 0x65, 0x64, 0x48, 0x00, 0x52, 0x05, 0x61, 0x64, 0x64, 0x65, 0x64, 0x12, 0x5d,
0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x5a, 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x6e, 0x74, 0x74, 0x2e, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5a, 0x0a, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x6e, 0x74, 0x74, 0x2e, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x48, 0x00, 0x52, 0x07, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x1a, 0x9c, 0x01, 0x0a, 0x05, 0x41, 0x64, 0x64, 0x65, 0x64, 0x12, 0x74, 0x0a, 0x1d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6e, 0x74, 0x74, 0x2e, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x1b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x76, 0x69, 0x65, 0x77, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0xe8, 0x02, 0x0a, 0x08, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xb2, 0xda, 0x21, 0x1f, 0x0a, 0x1d, 0x0a, 0x1b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x74, 0x0a, 0x1d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6e, 0x74, 0x74, 0x2e, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x1b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5e, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x23, 0xb2, 0xda, 0x21, 0x1f, 0x32, 0x1d, 0x0a, 0x1b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x56, 0x69, 0x65, 0x77, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x76, 0x69, 0x65, 0x77, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x7f, 0x0a, 0x07, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x74, 0x0a, 0x1d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6e, 0x74, 0x74, 0x2e, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x1b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x61, 0x0a, 0x07, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xb2, 0xda, 0x21, 0x1f, 0x0a, 0x1d, 0x0a, 0x1b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x76, 0x69, 0x65, 0x77, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x76, 0x69, 0x65, 0x77, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x48, 0x9a, 0xd9, 0x21, 0x1d, 0x0a, 0x1b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0xfa, 0xde, 0x21, 0x23, 0x0a, 0x21, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0xc0, 0x02, 0xe8, 0xde, 0x21, 0x00, 0x92, 0x8c, 0xd1, 0x02, 0x7f, 0x0a, 0x28, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x53, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x77, 0x61, 0x6e, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x71, 0x2f, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x0a, 0x1a, 0x63, 0x6f, 0x6d, 0x2e, 0x6e, 0x74, 0x74, 0x2e, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x70, 0x62, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x42, 0x26, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x00, 0x5a, 0x70, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x77, 0x61, 0x6e, 0x2f, 0x65, 0x64, 0x67, 0x65, 0x6c, 0x71, 0x2f, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x3b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescOnce sync.Once edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescData = edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDesc ) func edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescGZIP() []byte { edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescOnce.Do(func() { edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescData = protoimpl.X.CompressGZIP(edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescData) }) return edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDescData } var edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_goTypes = []interface{}{ (*ProvisioningApprovalRequestChange)(nil), // 0: ntt.devices.v1alpha.ProvisioningApprovalRequestChange (*ProvisioningApprovalRequestChange_Added)(nil), // 1: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Added (*ProvisioningApprovalRequestChange_Modified)(nil), // 2: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Modified (*ProvisioningApprovalRequestChange_Current)(nil), // 3: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Current (*ProvisioningApprovalRequestChange_Removed)(nil), // 4: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Removed (*ProvisioningApprovalRequest)(nil), // 5: ntt.devices.v1alpha.ProvisioningApprovalRequest (*ProvisioningApprovalRequest_FieldMask)(nil), // 6: ntt.devices.v1alpha.ProvisioningApprovalRequest_FieldMask } var edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_depIdxs = []int32{ 1, // 0: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.added:type_name -> ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Added 2, // 1: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.modified:type_name -> ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Modified 3, // 2: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.current:type_name -> ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Current 4, // 3: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.removed:type_name -> ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Removed 5, // 4: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Added.provisioning_approval_request:type_name -> ntt.devices.v1alpha.ProvisioningApprovalRequest 5, // 5: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Modified.provisioning_approval_request:type_name -> ntt.devices.v1alpha.ProvisioningApprovalRequest 6, // 6: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Modified.field_mask:type_name -> ntt.devices.v1alpha.ProvisioningApprovalRequest_FieldMask 5, // 7: ntt.devices.v1alpha.ProvisioningApprovalRequestChange.Current.provisioning_approval_request:type_name -> ntt.devices.v1alpha.ProvisioningApprovalRequest 8, // [8:8] is the sub-list for method output_type 8, // [8:8] is the sub-list for method input_type 8, // [8:8] is the sub-list for extension type_name 8, // [8:8] is the sub-list for extension extendee 0, // [0:8] is the sub-list for field type_name } func init() { edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_init() } func edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_init() { if edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto != nil { return } if !protoimpl.UnsafeEnabled { edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisioningApprovalRequestChange); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisioningApprovalRequestChange_Added); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisioningApprovalRequestChange_Modified); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisioningApprovalRequestChange_Current); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisioningApprovalRequestChange_Removed); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes[0].OneofWrappers = []interface{}{ (*ProvisioningApprovalRequestChange_Added_)(nil), (*ProvisioningApprovalRequestChange_Modified_)(nil), (*ProvisioningApprovalRequestChange_Current_)(nil), (*ProvisioningApprovalRequestChange_Removed_)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDesc, NumEnums: 0, NumMessages: 5, NumExtensions: 0, NumServices: 0, }, GoTypes: edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_goTypes, DependencyIndexes: edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_depIdxs, MessageInfos: edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_msgTypes, }.Build() edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto = out.File edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_rawDesc = nil edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_goTypes = nil edgelq_devices_proto_v1alpha_provisioning_approval_request_change_proto_depIdxs = nil }
0x0a, 0x08, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x6e, 0x74, 0x74, 0x2e, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65,
index.tsx
// 和视频保持一致 import React from 'react'; import { MenuBar } from '@/components/index'; import { useLocation } from 'umi'; import {StoreProvider} from 'think-react-store';
export default (props: any) => { const location = useLocation(); const paths = ['/', '/user', '/order']; return ( <StoreProvider store={stores} middlewares={[log]}> <MenuBar show={paths.includes(location.pathname)} pathname={location.pathname} /> {/* 后续再说,写了这句话,子页面加载才正常 */} {props.children} </StoreProvider> ); };
import * as stores from '@/stores' import log from 'think-react-store/middlewares/log'; import '@/global.less'
mod.rs
use std::{iter, rc::Rc}; use dominator_helpers::futures::AsyncLoader; use futures_signals::signal::Mutable; use shared::domain::jig::{JigId, JigSearchQuery}; use components::page_header::state::PageLinks; use super::search_results::SearchResults; use strum_macros::Display; mod search_state; pub use search_state::*; pub struct State { pub loader: AsyncLoader, pub mode: Mutable<HomePageMode>, pub is_logged_in: Mutable<bool>, pub search_options: Rc<SearchOptions>, pub search_selected: Rc<SearchSelected>, pub quick_searches: Vec<QuickSearch>, pub whats_new: Vec<WhatsNewItem>, pub parents_testimonials: Vec<Testimonial>, pub teachers_testimonials: Vec<Testimonial>, pub total_jigs_count: Mutable<u64>, pub play_jig: Mutable<Option<JigId>>, } impl State { pub fn new() -> Self { Self::new_with_search_selected(SearchSelected::new()) } pub fn new_search(query_params: Option<JigSearchQuery>) -> Self { let search_selected = match query_params { Some(query_params) => SearchSelected::from_search_request(query_params), None => SearchSelected::new(), }; Self::new_with_search_selected(search_selected) } fn new_with_search_selected(search_selected: SearchSelected) -> Self { Self { search_selected: Rc::new(search_selected), loader: AsyncLoader::new(), mode: Mutable::new(HomePageMode::Home), is_logged_in: Mutable::new(false), search_options: Rc::new(SearchOptions::new()), quick_searches: Self::get_quick_searches(), whats_new: Self::get_whats_new(), parents_testimonials: Self::get_parents_testimonials(), teachers_testimonials: Self::get_teachers_testimonials(), total_jigs_count: Mutable::new(0), play_jig: Mutable::new(None), } }
fn get_quick_searches() -> Vec<QuickSearch> { vec![ QuickSearch { search_term: String::from("Hebrew"), }, QuickSearch { search_term: String::from("Tishrei"), }, QuickSearch { search_term: String::from("Chanukah"), }, QuickSearch { search_term: String::from("Israel"), }, ] } fn get_whats_new() -> Vec<WhatsNewItem> { iter::repeat(WhatsNewItem { image_id: String::from("something.jpg"), image_lib: String::from("mock"), header: String::from("HOP TV - New Hebrew Series"), paragraph: String::from("Learning Hebrew with HOP Channel, Learning Hebrew with HOP Channel, Learning Hebrew with HOP Channel, Learning Hebrew with HOP Channel Learning Hebrew with HOP"), link: String::from(""), }).take(3).collect() } fn get_parents_testimonials() -> Vec<Testimonial> { vec![ Testimonial { image_id: String::from("orly-rachamim.jpg"), name: String::from("Orly Rachamim"), bio: String::from("Netivot HaTorah Day School, Ontario, Canada"), paragraph: String::from("Having the ability to search for and download games and activities in addition to creating your own and sharing it in the platform is a great crowd-sourcing opportunity. Using the rich creation packs as well as interactive layers helps enhance the learning experience for students by bringing the material to life."), }, Testimonial { image_id: String::from("liat-walker.png"), name: String::from("Liat Walker"), bio: String::from("Jewish Studies Coordinator, Martin J Gottlieb Day School, FL, USA"), paragraph: String::from("I use Ji as a way to enrich the students’ Jewish knowledge and experience. The lessons and images include every contemporary subject in the Jewish world and Israel and is an excellent way for our students to feel connected to their Jewish identity. Before Ji this kind of information would be found in a book or on an Internet site, which is geared for adults. In my opinion, there is no kid-friendly space for our students to learn about Jewish contemporary topics other than Ji."), }, Testimonial { image_id: String::from("jonathon-simons.png"), name: String::from("Jonathon Simons"), bio: String::from("Broughton Jewish, Manchester, UK"), paragraph: String::from("In the last three months, I have found that I have been able to get 100% engagement from students and have been able to improve their grades."), }, Testimonial { image_id: String::from("dana-cappel.png"), name: String::from("Dana Cappel"), bio: String::from("Beit Issie Shapiro, Israel"), paragraph: String::from("Ji is a fantastic resource for our students. The fact that I can create customized activities for my students means that I can create activities in exactly the way that they need them to be so that they can learn and participate to their maximum potential."), }, ] } fn get_teachers_testimonials() -> Vec<Testimonial> { vec![ Testimonial { image_id: String::from("rabbi-yakov-shafferman.png"), name: String::from("Rabbi Yakov Shafferman"), bio: String::from("Jesode Hatorah, Antwerp, Belgium"), paragraph: String::from("I think this tool is going to be very, very useful for our school for many different subjects. We’re teaching Hebrew and other traditional subjects like Chumash and Gemarah. We can use it for teaching itself and for assessing the students. I’m looking forward to enhancing Jewish learning in our school with Ji."), }, Testimonial { image_id: String::from("rabbi-hiller.jpg"), name: String::from("Rabbi Hersh Hiller"), bio: String::from("Yeshiva Elementary, Milwaukee"), paragraph: String::from("Yesterday, I tried the Tu’Bishvat app with our iPads. It was amazing! The 17 kids were super-engaged. You have to imagine five students with their heads packed tightly against each other in a tight circle hovering over the glow of the iPad on the floor. Thank you for all the work that you put into to make an amazing program that I could use my classroom."), }, Testimonial { image_id: String::from("adina-levin.png"), name: String::from("Adina Levin"), bio: String::from("Hillel Day School, Detroit, USA"), paragraph: String::from("I’m amazed with what is finally, finally available for the Jewish Studies teachers. I always was jealous of the English teachers, that have so much material, and so much sources, and we as the Judaic Studies teachers are always trying to create our own material and come up with innovations."), }, Testimonial { image_id: String::from("rabbi-moshe-rosenberg.jpg"), name: String::from("Rabbi Moshe Rosenberg"), bio: String::from("SAR Academy, NY"), paragraph: String::from("What sets your products apart is that you do not compromise on either the substance or the style. You have both the truly professional look and true content."), }, ] } } #[derive(Clone, Display)] pub enum HomePageMode { #[strum(serialize = "home")] Home, #[strum(serialize = "results")] Search(Rc<SearchResults>), } impl From<&HomePageMode> for PageLinks { fn from(mode: &HomePageMode) -> PageLinks { match mode { &HomePageMode::Home => PageLinks::Home, &HomePageMode::Search(..) => PageLinks::Content, } } } #[derive(Clone)] pub struct QuickSearch { pub search_term: String, } #[derive(Clone)] pub struct WhatsNewItem { pub image_id: String, pub image_lib: String, // is this always the same? pub header: String, pub paragraph: String, pub link: String, } #[derive(Clone)] pub struct Testimonial { pub image_id: String, pub name: String, pub bio: String, pub paragraph: String, }
txgen.go
// Copyright (c) 2004-present Facebook All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package txgen import ( "errors" "go/types" "os" "path/filepath" "github.com/99designs/gqlgen/codegen" "github.com/99designs/gqlgen/codegen/config" "github.com/99designs/gqlgen/codegen/templates" "github.com/99designs/gqlgen/plugin" ) type txgen struct { config.PackageConfig } // New returns a txgen plugin func New(cfg config.PackageConfig) plugin.Plugin { if cfg.Package == "" { cfg.Package = "resolver" } if cfg.Filename == ""
if cfg.Type == "" { cfg.Type = "txResolver" } return txgen{cfg} } func (txgen) Name() string { return "txgen" } func (t txgen) MutateConfig(cfg *config.Config) error { err := os.Remove(t.Filename) if os.IsNotExist(err) { err = nil } return err } func (t txgen) GenerateCode(data *codegen.Data) error { var mutation *codegen.Object for _, object := range data.Objects { if object.Definition == data.Schema.Mutation { mutation = object break } } if mutation == nil { return errors.New("unable to find mutation object") } return templates.Render(templates.Options{ PackageName: t.Package, Filename: filepath.Join(t.Package, t.Filename), Data: &ResolverBuild{ Object: mutation, Type: t.Type, }, Funcs: map[string]interface{}{ "ResultType": func(f *codegen.Field) string { result := templates.CurrentImports.LookupType(f.TypeReference.GO) if f.Object.Stream { result = "<-chan " + result } return result }, "Package": func(f *codegen.Field) string { t := f.TypeReference for e := t.Elem(); e != nil; e = t.Elem() { t = e } if t, ok := t.GO.(*types.Named); ok { return t.Obj().Pkg().Name() } return "" }, }, GeneratedHeader: true, }) } // ResolverBuild defines the data passed to txgen template. type ResolverBuild struct { *codegen.Object Type string }
{ cfg.Filename = "tx_generated.go" }
settings_external_api.go
package external import ( "context" "github.com/shopspring/decimal" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" api "github.com/mxc-foundation/lpwan-app-server/api/extapi" pb "github.com/mxc-foundation/lpwan-app-server/api/m2m-serves-appserver" "github.com/mxc-foundation/lpwan-app-server/internal/modules/serverinfo" "github.com/mxc-foundation/lpwan-app-server/internal/mxpcli" ) // SettingsServerAPI defines the settings of the Server API structure type SettingsServerAPI struct{} // NewSettingsServerAPI defines the SettingsServerAPI Validator func NewSettingsServerAPI() *SettingsServerAPI { return &SettingsServerAPI{} } // GetSettings defines the settings of the Server API request and response func (s *SettingsServerAPI) GetSettings(ctx context.Context, req *api.GetSettingsRequest) (*api.GetSettingsResponse, error) { logInfo := "api/appserver_serves_ui/GetSettings" if err := serverinfo.NewValidator().IsGlobalAdmin(ctx); err != nil { return nil, status.Errorf(codes.PermissionDenied, err.Error()) } settingClient := mxpcli.Global.GetSettingsServiceClient() resp, err := settingClient.GetSettings(ctx, &pb.GetSettingsRequest{}) if err != nil { log.WithError(err).Error(logInfo) return &api.GetSettingsResponse{}, status.Errorf(codes.Unavailable, err.Error())
compensation, err := decimal.NewFromString(resp.Compensation) if err != nil { return nil, status.Errorf(codes.Internal, "couldn't parse compensation: %v", err) } compFloat, _ := compensation.Float64() return &api.GetSettingsResponse{ LowBalanceWarning: resp.LowBalanceWarning, DownlinkPrice: resp.DownlinkPrice, SupernodeIncomeRatio: resp.SupernodeIncomeRatio, StakingInterest: resp.StakingInterest, Compensation: compFloat, }, status.Error(codes.OK, "") } // ModifySettings defines the modification of the Server API settings func (s *SettingsServerAPI) ModifySettings(ctx context.Context, req *api.ModifySettingsRequest) (*api.ModifySettingsResponse, error) { logInfo := "api/appserver_serves_ui/ModifySettings" if err := serverinfo.NewValidator().IsGlobalAdmin(ctx); err != nil { return nil, status.Errorf(codes.PermissionDenied, err.Error()) } settingClient := mxpcli.Global.GetSettingsServiceClient() resp, err := settingClient.ModifySettings(ctx, &pb.ModifySettingsRequest{ LowBalanceWarning: req.LowBalanceWarning, DownlinkFee: req.DownlinkFee, TransactionPercentageShare: req.TransactionPercentageShare, }) if err != nil { log.WithError(err).Error(logInfo) return &api.ModifySettingsResponse{}, status.Errorf(codes.Unavailable, err.Error()) } return &api.ModifySettingsResponse{ Status: resp.Status, }, status.Error(codes.OK, "") }
}
repository_helper.rs
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use sha2::{Digest, Sha256}; use std::fs; use std::io::Read; use std::path::{Path, PathBuf}; use tough::{RepositoryLoader, TargetName}; use url::Url; use super::{ super::errors::{Result, SigstoreError}, constants::{SIGSTORE_FULCIO_CERT_TARGET, SIGSTORE_REKOR_PUB_KEY_TARGET}, }; pub(crate) struct RepositoryHelper { repository: tough::Repository, checkout_dir: Option<PathBuf>, } impl RepositoryHelper { pub(crate) fn new<R>( root: R, metadata_base: Url, target_base: Url, checkout_dir: Option<&Path>, ) -> Result<Self> where R: Read, { let repository = RepositoryLoader::new(root, metadata_base, target_base) .expiration_enforcement(tough::ExpirationEnforcement::Safe) .load()?; Ok(Self { repository, checkout_dir: checkout_dir.map(|s| s.to_owned()), }) } /// Fetch Fulcio certificate from the given TUF repository or reuse /// the local cache is used if its contents are not outdated. /// /// The contents of the local cache are updated when they are outdated. pub(crate) fn fulcio_cert(&self) -> Result<Vec<u8>> { let fulcio_target_name = TargetName::new(SIGSTORE_FULCIO_CERT_TARGET)?; let local_fulcio_path = self .checkout_dir .as_ref() .map(|d| Path::new(d).join(SIGSTORE_FULCIO_CERT_TARGET)); fetch_target_or_reuse_local_cache( &self.repository, &fulcio_target_name, local_fulcio_path.as_ref(), ) } /// Fetch Rekor public key from the given TUF repository or reuse /// the local cache if it's not outdated. /// /// The contents of the local cache are updated when they are outdated. pub(crate) fn rekor_pub_key(&self) -> Result<Vec<u8>> { let rekor_target_name = TargetName::new(SIGSTORE_REKOR_PUB_KEY_TARGET)?; let local_rekor_path = self .checkout_dir .as_ref() .map(|d| Path::new(d).join(SIGSTORE_REKOR_PUB_KEY_TARGET)); fetch_target_or_reuse_local_cache( &self.repository, &rekor_target_name, local_rekor_path.as_ref(), ) } } /// Download a file stored inside of a TUF repository, try to reuse a local /// cache when possible. /// /// * `repository`: TUF repository holding the file /// * `target`: TUF representation of the file to be downloaded /// * `local_file`: location where the file should be downloaded /// /// This function will reuse the local copy of the file if contents /// didn't change. /// This check is done by comparing the digest of the local file, if found, /// with the digest reported inside of the TUF repository metadata. /// /// **Note well:** the `local_file` is updated whenever its contents are /// outdated. fn fetch_target_or_reuse_local_cache( repository: &tough::Repository, target_name: &TargetName, local_file: Option<&PathBuf>, ) -> Result<Vec<u8>> { let (local_file_outdated, local_file_contents) = if let Some(path) = local_file { is_local_file_outdated(repository, target_name, path) } else { Ok((true, None)) }?; let data = if local_file_outdated { let data = fetch_target(repository, target_name)?; if let Some(path) = local_file { // update the local file to have latest data from the TUF repo fs::write(path, data.clone())?; } data } else { local_file_contents .expect("local file contents to not be 'None'") .as_bytes() .to_owned() }; Ok(data) } /// Download a file from a TUF repository fn fetch_target(repository: &tough::Repository, target_name: &TargetName) -> Result<Vec<u8>> { let data: Vec<u8>; match repository.read_target(target_name)? { None => Err(SigstoreError::TufTargetNotFoundError( target_name.raw().to_string(), )), Some(reader) => { data = read_to_end(reader)?; Ok(data) } } } /// Compares the checksum of a local file, with the digest reported inside of /// TUF repository metadata fn is_local_file_outdated( repository: &tough::Repository, target_name: &TargetName, local_file: &Path, ) -> Result<(bool, Option<String>)> { let target = repository .targets() .signed .targets .get(target_name) .ok_or_else(|| SigstoreError::TufTargetNotFoundError(target_name.raw().to_string()))?; if local_file.exists() { let data = fs::read_to_string(local_file)?; let local_checksum = Sha256::digest(data.clone()); let expected_digest: Vec<u8> = target.hashes.sha256.to_vec(); if local_checksum.as_slice() == expected_digest.as_slice() { // local data is not outdated Ok((false, Some(data))) } else { Ok((true, None)) } } else { Ok((true, None)) } } /// Gets the goods from a read and makes a Vec fn read_to_end<R: Read>(mut reader: R) -> Result<Vec<u8>> { let mut v = Vec::new(); reader.read_to_end(&mut v)?; Ok(v) } #[cfg(test)] mod tests { use super::super::constants::*; use super::*; use std::path::PathBuf; use tempfile::TempDir; /// Returns the path to our test data directory fn test_data() -> PathBuf
fn local_tuf_repo() -> Result<tough::Repository> { let metadata_base_path = test_data().join("repository"); let targets_base_path = metadata_base_path.join("targets"); let metadata_base_url = format!( "file://{}", metadata_base_path .to_str() .ok_or_else(|| SigstoreError::UnexpectedError(String::from( "Cannot convert metadata_base_path into a str" )))? ); let metadata_base_url = url::Url::parse(&metadata_base_url).map_err(|_| { SigstoreError::UnexpectedError(String::from( "Cannot convert metadata_base_url into a URL", )) })?; let target_base_url = format!( "file://{}", targets_base_path .to_str() .ok_or_else(|| SigstoreError::UnexpectedError(String::from( "Cannot convert targets_base_path into a str" )))? ); let target_base_url = url::Url::parse(&target_base_url).map_err(|_| { SigstoreError::UnexpectedError(String::from( "Cannot convert targets_base_url into a URL", )) })?; // It's fine to ignore timestamp.json expiration inside of test env let repo = RepositoryLoader::new(SIGSTORE_ROOT.as_bytes(), metadata_base_url, target_base_url) .expiration_enforcement(tough::ExpirationEnforcement::Unsafe) .load()?; Ok(repo) } #[test] fn get_files_without_using_local_cache() { let repository = local_tuf_repo().expect("Local TUF repo should not fail"); let helper = RepositoryHelper { repository, checkout_dir: None, }; let actual = helper.fulcio_cert().expect("fulcio cert cannot be read"); let expected = fs::read( test_data() .join("repository") .join("targets") .join("fulcio.crt.pem"), ) .expect("cannot read fulcio cert from test data"); assert_eq!( actual, expected, "The fulcio cert read from the TUF repository is not what was expected" ); let actual = helper.rekor_pub_key().expect("rekor key cannot be read"); let expected = fs::read( test_data() .join("repository") .join("targets") .join("rekor.pub"), ) .expect("cannot read rekor key from test data"); assert_eq!( actual, expected, "The rekor key read from the TUF repository is not what was expected" ); } #[test] fn download_files_to_local_cache() { let cache_dir = TempDir::new().expect("Cannot create temp cache dir"); let repository = local_tuf_repo().expect("Local TUF repo should not fail"); let helper = RepositoryHelper { repository, checkout_dir: Some(cache_dir.path().to_path_buf()), }; let expected = helper.fulcio_cert().expect("fulcio cert cannot be read"); let actual = fs::read(cache_dir.path().join("fulcio.crt.pem")) .expect("cannot read fulcio cert from test data"); assert_eq!( actual, expected, "The fulcio cert read from the cache dir is not what was expected" ); let expected = helper.rekor_pub_key().expect("rekor key cannot be read"); let actual = fs::read(cache_dir.path().join("rekor.pub")) .expect("cannot read rekor key from cache dir"); assert_eq!( actual, expected, "The rekor key read from the cache dir is not what was expected" ); } #[test] fn update_local_cache() { let cache_dir = TempDir::new().expect("Cannot create temp cache dir"); // put some outdated files inside of the cache fs::write( cache_dir.path().join(SIGSTORE_FULCIO_CERT_TARGET), b"fake fulcio", ) .expect("Cannot write file to cache dir"); fs::write( cache_dir.path().join(SIGSTORE_REKOR_PUB_KEY_TARGET), b"fake rekor", ) .expect("Cannot write file to cache dir"); let repository = local_tuf_repo().expect("Local TUF repo should not fail"); let helper = RepositoryHelper { repository, checkout_dir: Some(cache_dir.path().to_path_buf()), }; let expected = helper.fulcio_cert().expect("fulcio cert cannot be read"); let actual = fs::read(cache_dir.path().join("fulcio.crt.pem")) .expect("cannot read fulcio cert from test data"); assert_eq!( actual, expected, "The fulcio cert read from the cache dir is not what was expected" ); let expected = helper.rekor_pub_key().expect("rekor key cannot be read"); let actual = fs::read(cache_dir.path().join("rekor.pub")) .expect("cannot read rekor key from cache dir"); assert_eq!( actual, expected, "The rekor key read from the cache dir is not what was expected" ); } }
{ PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("tests") .join("data") }
index.ts
//Imports import dotenv from "dotenv"; interface Config { NODE_ENV: string;
API: string; } dotenv.config(); const config: Config = { NODE_ENV: process.env.NODE_ENV === "production" ? "production" : "development", API: removeTrailingSlash(process.env.REACT_APP_API || "/"), }; export default config; function removeTrailingSlash(url: string): string { return url.endsWith("/") ? url.substring(0, url.length - 1) : url; }
_.py
import sys from collections import deque def BFS():
n,m = list(map(int,sys.stdin.readline().rstrip("\n").split(" "))) road = [] for i in range(n): o = sys.stdin.readline().rstrip("\n") l = [] for c in o: l.append(int(c)) road.append(l) print(BFS())
queue = deque() queue.append((0,0,1,False)) visited = [[[0]*2 for _ in range(m)]for _ in range(n)] visited[0][0][0] = 1 while queue: x,y,d,b = queue.popleft() if x== n-1 and y == m-1: return d if 0<=x+1<n and road[x+1][y] ==1 and b==0: visited[x+1][y][1] = 1 queue.append((x+1,y,d+1,1)) if 0<=x-1<n and road[x-1][y] ==1 and b==0: visited[x-1][y][1] = 1 queue.append((x-1,y,d+1,1)) if 0<=y+1<m and road[x][y+1] ==1 and b==0: visited[x][y+1][1] = 1 queue.append((x,y+1,d+1,1)) if 0<=y-1<m and road[x][y-1] ==1 and b==0: visited[x][y-1][1] = 1 queue.append((x,y-1,d+1,1)) if 0<=x+1<n and road[x+1][y] ==0 and visited[x+1][y][b] == 0: visited[x+1][y][b] = 1 queue.append((x+1,y,d+1,b)) if 0<=x-1<n and road[x-1][y] ==0 and visited[x-1][y][b] == 0: visited[x-1][y][b] = 1 queue.append((x-1,y,d+1,b)) if 0<=y+1<m and road[x][y+1] ==0 and visited[x][y+1][b] == 0: visited[x][y+1][b] = 1 queue.append((x,y+1,d+1,b)) if 0<=y-1<m and road[x][y-1] ==0 and visited[x][y-1][b] == 0: visited[x][y-1][b] = 1 queue.append((x,y-1,d+1,b)) return -1
mapping_identity_target_create_params.py
# coding: utf-8 """ Isilon SDK Isilon SDK - Language bindings for the OneFS API # noqa: E501 OpenAPI spec version: 3 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from isi_sdk_8_0.models.group_member import GroupMember # noqa: F401,E501 class MappingIdentityTargetCreateParams(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'on_disk': 'bool', 'target': 'GroupMember', 'type': 'str' } attribute_map = { 'on_disk': 'on_disk', 'target': 'target', 'type': 'type' } def __init__(self, on_disk=None, target=None, type=None): # noqa: E501 """MappingIdentityTargetCreateParams - a model defined in Swagger""" # noqa: E501 self._on_disk = None self._target = None self._type = None self.discriminator = None if on_disk is not None: self.on_disk = on_disk self.target = target if type is not None: self.type = type @property def on_disk(self): """Gets the on_disk of this MappingIdentityTargetCreateParams. # noqa: E501 Identity is preferred on-disk. # noqa: E501 :return: The on_disk of this MappingIdentityTargetCreateParams. # noqa: E501 :rtype: bool """ return self._on_disk @on_disk.setter def on_disk(self, on_disk): """Sets the on_disk of this MappingIdentityTargetCreateParams. Identity is preferred on-disk. # noqa: E501 :param on_disk: The on_disk of this MappingIdentityTargetCreateParams. # noqa: E501 :type: bool """ self._on_disk = on_disk @property def target(self): """Gets the target of this MappingIdentityTargetCreateParams. # noqa: E501 Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'. # noqa: E501 :return: The target of this MappingIdentityTargetCreateParams. # noqa: E501 :rtype: GroupMember """ return self._target @target.setter def target(self, target):
@property def type(self): """Gets the type of this MappingIdentityTargetCreateParams. # noqa: E501 Origin of identity mapping. # noqa: E501 :return: The type of this MappingIdentityTargetCreateParams. # noqa: E501 :rtype: str """ return self._type @type.setter def type(self, type): """Sets the type of this MappingIdentityTargetCreateParams. Origin of identity mapping. # noqa: E501 :param type: The type of this MappingIdentityTargetCreateParams. # noqa: E501 :type: str """ allowed_values = ["auto", "external", "manual"] # noqa: E501 if type not in allowed_values: raise ValueError( "Invalid value for `type` ({0}), must be one of {1}" # noqa: E501 .format(type, allowed_values) ) self._type = type def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, MappingIdentityTargetCreateParams): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
"""Sets the target of this MappingIdentityTargetCreateParams. Specifies properties for a persona, which consists of either a 'type' and a 'name' or an 'ID'. # noqa: E501 :param target: The target of this MappingIdentityTargetCreateParams. # noqa: E501 :type: GroupMember """ if target is None: raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501 self._target = target
package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyBandit(PythonPackage):
"""Security oriented static analyser for python code.""" homepage = "https://bandit.readthedocs.io/en/latest/" pypi = "bandit/bandit-1.7.0.tar.gz" version('1.7.0', sha256='8a4c7415254d75df8ff3c3b15cfe9042ecee628a1e40b44c15a98890fbfc2608') depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type='build') depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', type=('build', 'run')) depends_on('[email protected]:', when='platform=win32', type=('build', 'run'))
webpack.prod.js
var webpack = require('webpack'); var webpackMerge = require('webpack-merge'); var ExtractTextPlugin = require('extract-text-webpack-plugin'); var commonConfig = require('./webpack.common.js'); var helpers = require('./helpers'); const ENV = process.env.NODE_ENV = process.env.ENV = 'production'; module.exports = webpackMerge(commonConfig, { devtool: 'source-map', output: { path: helpers.root('dist'), publicPath: '/', filename: '[name].[hash].js', chunkFilename: '[id].[hash].chunk.js' }, htmlLoader: { minimize: false // workaround for ng2 }, plugins: [ new webpack.NoErrorsPlugin(), new webpack.optimize.DedupePlugin(), new webpack.optimize.UglifyJsPlugin({ // https://github.com/angular/angular/issues/10618 mangle: { keep_fnames: true } }), new ExtractTextPlugin('[name].[hash].css'), new webpack.DefinePlugin({ 'process.env': { 'ENV': JSON.stringify(ENV) }
] });
})
threads.py
#!/usr/bin/python # -*- coding: UTF-8 -*- """ @author: Alan @time: 2021/05/18 """ from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED import traceback
def __init__(self, max_workers=None, thread_name_prefix=''): super().__init__(max_workers, thread_name_prefix) def thread_log(self, worker): """捕获线程异常,并保存日志""" try: result = worker.result() return result except: traceback.print_exc() def execute(self, fn, *args, **kwargs): """生成新线程,并捕捉异常""" thread = self.submit(fn, *args, **kwargs) thread.add_done_callback(self.thread_log) return thread @staticmethod def execute_after_done(fn, workers, *args, **kwargs): wait(workers, timeout=86400, return_when=ALL_COMPLETED) return fn(*args, **kwargs)
class MultiThread(ThreadPoolExecutor):
base.rs
#[doc = "Reader of register BASE"] pub type R = crate::R<u32, super::BASE>; #[doc = "Writer for register BASE"] pub type W = crate::W<u32, super::BASE>; #[doc = "Register BASE `reset()`'s with value 0x2000_0000"] impl crate::ResetValue for super::BASE { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0x2000_0000
#[doc = "Reader of field `BASE`"] pub type BASE_R = crate::R<u32, u32>; #[doc = "Write proxy for field `BASE`"] pub struct BASE_W<'a> { w: &'a mut W, } impl<'a> BASE_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u32) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff); self.w } } impl R { #[doc = "Bits 0:31 - The ram base address."] #[inline(always)] pub fn base(&self) -> BASE_R { BASE_R::new((self.bits & 0xffff_ffff) as u32) } } impl W { #[doc = "Bits 0:31 - The ram base address."] #[inline(always)] pub fn base(&mut self) -> BASE_W { BASE_W { w: self } } }
} }
example_test.go
package index_test import ( "fmt" "strings" "github.com/openacid/slim/index" ) type Data string func (d Data) Read(offset int64, key string) (string, bool) { kv := strings.Split(string(d)[offset:], ",")[0:2] if kv[0] == key { return kv[1], true } return "", false } func Example() { // SlimTrie is a memory efficient index data type. // // In this example, we show how to accelerate external data accessing // (in memory or on disk) by indexing them with a SlimTrie: // // `data` is a sample of some unindexed data. In our example it is a comma // seperated key value series. // // In order to let SlimTrie be able to read data, `data` should have // a `Read` method: // Read(offset int64, key string) (string, bool) data := Data("Aaron,1,Agatha,1,Al,2,Albert,3,Alexander,5,Alison,8") // keyOffsets is a prebuilt index that stores key and its offset in data accordingly. keyOffsets := []index.OffsetIndexItem{ {Key: "Aaron", Offset: 0}, {Key: "Agatha", Offset: 8}, {Key: "Al", Offset: 17}, {Key: "Albert", Offset: 22}, {Key: "Alexander", Offset: 31}, {Key: "Alison", Offset: 43}, } // Create a index `index.SlimIndex`, which is simply a container of SlimTrie // and its data.
st, err := index.NewSlimIndex(keyOffsets, data) if err != nil { fmt.Println(err) } // Lookup v, found := st.Get2("Alison") fmt.Printf("key: %q\n found: %t\n value: %q\n", "Alison", found, v) v, found = st.Get2("foo") fmt.Printf("key: %q\n found: %t\n value: %q\n", "foo", found, v) // Output: // key: "Alison" // found: true // value: "8" // key: "foo" // found: false // value: "" }
proposals.go
package simulation import ( "math/rand" simappparams "github.com/hashrs/blockchain/framework/chain-app/simapp/params" sdk "github.com/hashrs/blockchain/framework/chain-app/types" "github.com/hashrs/blockchain/framework/chain-app/x/gov/types" "github.com/hashrs/blockchain/framework/chain-app/x/simulation" ) // OpWeightSubmitTextProposal app params key for text proposal const OpWeightSubmitTextProposal = "op_weight_submit_text_proposal"
return []simulation.WeightedProposalContent{ { AppParamsKey: OpWeightSubmitTextProposal, DefaultWeight: simappparams.DefaultWeightTextProposal, ContentSimulatorFn: SimulateTextProposalContent, }, } } // SimulateTextProposalContent returns a random text proposal content. func SimulateTextProposalContent(r *rand.Rand, _ sdk.Context, _ []simulation.Account) types.Content { return types.NewTextProposal( simulation.RandStringOfLength(r, 140), simulation.RandStringOfLength(r, 5000), ) }
// ProposalContents defines the module weighted proposals' contents func ProposalContents() []simulation.WeightedProposalContent {
transactions_history.rs
use crate::json::BACKEND_HISTORY_TRANSACTION_LIST_PAGE; use crate::models::backend::transactions::Transaction; use crate::models::commons::{Page, PageMetadata}; use crate::models::service::transactions::summary::{ ConflictType, TransactionListItem, TransactionSummary, }; use crate::models::service::transactions::TransactionStatus::Success; use crate::models::service::transactions::TransferDirection::{Incoming, Outgoing}; use crate::models::service::transactions::{Custom, TransactionInfo, Transfer}; use crate::models::service::transactions::{Erc20Transfer, TransferInfo}; use crate::providers::info::*; use crate::services::transactions_history::{ adjust_page_meta, backend_txs_to_summary_txs, get_day_timestamp_millis, peek_timestamp_and_remove_item, service_txs_to_tx_list_items, }; #[test] fn adjust_page_meta_offset_0() { let input = PageMetadata { offset: 0, limit: 50, }; let expected = PageMetadata { offset: 0, limit: input.limit, }; let actual = adjust_page_meta(&input); assert_eq!(expected, actual); } #[test] fn adjust_page_meta_offset_greater_than_0() { let input = PageMetadata { offset: 1, limit: 50, }; let expected = PageMetadata { offset: 0, limit: 51, }; let actual = adjust_page_meta(&input); assert_eq!(expected, actual); } #[rocket::async_test] async fn backend_txs_to_summary_txs_empty() { let backend_txs = Page { next: None, previous: None, results: vec![], }; let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider.expect_token_info().times(0); let mut back_end_txs_iter = backend_txs.results.into_iter(); let actual = backend_txs_to_summary_txs(&mut back_end_txs_iter, &mut mock_info_provider, "") .await .unwrap(); assert_eq!(actual.is_empty(), true); } #[rocket::async_test] async fn backend_txs_to_summary_txs_with_values() { let backend_txs = serde_json::from_str::<Page<Transaction>>(BACKEND_HISTORY_TRANSACTION_LIST_PAGE).unwrap(); let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider .expect_token_info() .times(3) .returning(move |_| bail!("No token info")); mock_info_provider .expect_full_address_info_search() .times(6) .returning(move |_| bail!("No address info")); let mut back_end_txs_iter = backend_txs.results.into_iter(); let expected = vec![ TransactionSummary { id: "module_0x1230B3d59858296A31053C1b8562Ecf89A2f888b_0xcd10b23687bf336d0f4c0a3383590d3d1722aaa99a41fd0d289a5f69a8266c8f_0x53b6e88b578a6313".into(), timestamp: 1606845854000, tx_status: Success, tx_info: TransactionInfo::Custom( Custom { to: "0xc778417E063141139Fce010982780140Aa0cD5Ab".into(), data_size: "68".into(), value: "0".into(), method_name: Some("transfer".into()), action_count: None, to_info: None, is_cancellation: false, }, ), execution_info: None, safe_app_info: None, }, TransactionSummary { id: "module_0x1230B3d59858296A31053C1b8562Ecf89A2f888b_0x1cf24abdb39bb7b156677a128e709cea55c6991b12708904d1f0f3664ad6646e_0x2e5157f6f782e36f".into(), timestamp: 1606845794000, tx_status: Success, tx_info: TransactionInfo::Custom( Custom { to: "0xD9BA894E0097f8cC2BBc9D24D308b98e36dc6D02".into(), data_size: "68".into(), value: "0".into(), method_name: Some("transfer".into()), action_count: None, to_info: None, is_cancellation: false, }, ), execution_info: None, safe_app_info: None, }, TransactionSummary { id: "module_0x1230B3d59858296A31053C1b8562Ecf89A2f888b_0x3f12bb74cd91ef09d553f66e3623bceaf879ba3dcb325227b1fbf2455757891a_0x15a0e5a089475db".into(), timestamp: 1606845070000, tx_status: Success, tx_info: TransactionInfo::Custom( Custom { to: "0xD9BA894E0097f8cC2BBc9D24D308b98e36dc6D02".into(), data_size: "68".into(), value: "0".into(), method_name: Some("transfer".into()), action_count: None, to_info: None, is_cancellation: false, }, ), execution_info: None, safe_app_info: None, }, TransactionSummary { id: "ethereum_0x1230B3d59858296A31053C1b8562Ecf89A2f888b_0x021d4d8cb68f3f772906b58f97b66c6ead228c252627c5b1aff4b496d4ff0c2d_0xfd0dbbc7700a140f".into(), timestamp: 1606744033000, tx_status: Success, tx_info: TransactionInfo::Transfer( Transfer { sender: "0x1230B3d59858296A31053C1b8562Ecf89A2f888b".into(), sender_info: None, recipient: "0xF353eBBa77e5E71c210599236686D51cA1F88b84".into(), recipient_info: None, direction: Outgoing, transfer_info: TransferInfo::Erc20( Erc20Transfer { token_address: "0x63704B63Ac04f3a173Dfe677C7e3D330c347CD88".into(), token_name: Some( "TEST AQER".into(), ), token_symbol: Some( "AQER".into(), ), logo_uri: Some( "https://gnosis-safe-token-logos.s3.amazonaws.com/0x63704B63Ac04f3a173Dfe677C7e3D330c347CD88.png".into(), ), decimals: Some( 18, ), value: "100000000000000000".into(), }, ), }, ), execution_info: None, safe_app_info: None, }, TransactionSummary { id: "ethereum_0x1230B3d59858296A31053C1b8562Ecf89A2f888b_0x5f4b7555f8e977ae302ab4125de685ccfacf52ac70e6f0aa2939bcb347f9a732_0xb7ceaac0cd5a85c5".into(), timestamp: 1606743581000, tx_status: Success, tx_info: TransactionInfo::Transfer( Transfer { sender: "0x1230B3d59858296A31053C1b8562Ecf89A2f888b".into(), sender_info: None, recipient: "0xf2565317F3Ae8Ae9EA98E9Fe1e7FADC77F823cbD".into(), recipient_info: None, direction: Outgoing, transfer_info: TransferInfo::Erc20( Erc20Transfer { token_address: "0x63704B63Ac04f3a173Dfe677C7e3D330c347CD88".into(), token_name: Some( "TEST AQER".into(), ), token_symbol: Some( "AQER".into(), ), logo_uri: Some( "https://gnosis-safe-token-logos.s3.amazonaws.com/0x63704B63Ac04f3a173Dfe677C7e3D330c347CD88.png".into(), ), decimals: Some(18), value: "100000000000000000".into(), }, ), }, ), execution_info: None, safe_app_info: None, }, TransactionSummary { id: "ethereum_0x1230B3d59858296A31053C1b8562Ecf89A2f888b_0xaafed95936f9d71eb8d9612e83f3f93f9decf33f11bbb4aa79cae98966ffa7fe_0x11bd3d64559a0af7".into(), timestamp: 1606739725000, tx_status: Success, tx_info: TransactionInfo::Transfer( Transfer { sender: "0xf2565317F3Ae8Ae9EA98E9Fe1e7FADC77F823cbD".into(), sender_info: None, recipient: "0x1230B3d59858296A31053C1b8562Ecf89A2f888b".into(), recipient_info: None, direction: Incoming, transfer_info: TransferInfo::Erc20( Erc20Transfer { token_address: "0x81D0FF4fE216fB6aC98ED609086A92d94dbfE666".into(), token_name: Some( "LS".into(), ), token_symbol: Some( "LS".into(), ), logo_uri: Some( "https://gnosis-safe-token-logos.s3.amazonaws.com/0x81D0FF4fE216fB6aC98ED609086A92d94dbfE666.png".into(), ), decimals: Some( 18, ), value: "400000000000000".into(), }, ), }, ), execution_info: None, safe_app_info: None, }, ]; let actual = backend_txs_to_summary_txs( &mut back_end_txs_iter, &mut mock_info_provider, "0x1230B3d59858296A31053C1b8562Ecf89A2f888b", ) .await .unwrap(); assert_eq!(expected, actual); } #[test] fn service_txs_to_tx_list_items_empty() { let service_tx: Vec<TransactionSummary> = vec![]; let utc_timezone_offset = 0; let actual = service_txs_to_tx_list_items(service_tx, -1, utc_timezone_offset).unwrap(); assert_eq!(actual.is_empty(), true); } #[rocket::async_test] async fn service_txs_to_tx_list_items_last_timestamp_undefined() { let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider .expect_token_info() .times(6) .returning(move |_| bail!("No token info")); mock_info_provider .expect_full_address_info_search() .times(12) .returning(move |_| bail!("No address info")); let service_txs = get_service_txs(&mut mock_info_provider).await; let service_txs_copy = get_service_txs(&mut mock_info_provider).await; let utc_timezone_offset = 0; let mut service_txs_inter = service_txs.into_iter(); let expected = vec![ TransactionListItem::DateLabel { timestamp: 1606780800000, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::DateLabel { timestamp: 1606694400000, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, ]; let actual = service_txs_to_tx_list_items(service_txs_copy, -1, utc_timezone_offset).unwrap(); assert_eq!(expected, actual); } #[rocket::async_test] async fn service_txs_to_tx_list_items_last_timestamp_defined_but_different() { let last_timestamp = 1606867200000; let utc_timezone_offset = 0; let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider .expect_token_info() .times(6) .returning(move |_| bail!("No token info")); mock_info_provider .expect_full_address_info_search() .times(12) .returning(move |_| bail!("No address info")); let service_txs = get_service_txs(&mut mock_info_provider).await; let service_txs_copy = get_service_txs(&mut mock_info_provider).await; let mut service_txs_inter = service_txs.into_iter(); let expected = vec![ TransactionListItem::DateLabel { timestamp: 1606780800000, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::DateLabel { timestamp: 1606694400000, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, ]; let actual = service_txs_to_tx_list_items(service_txs_copy, last_timestamp, utc_timezone_offset) .unwrap(); assert_eq!(expected, actual); } #[rocket::async_test] async fn service_txs_to_tx_list_items_last_timestamp_defined_and_same() { let last_timestamp = 1606780800000; let utc_timezone_offset = 0; let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider .expect_token_info() .times(6) .returning(move |_| bail!("No token info")); mock_info_provider .expect_full_address_info_search() .times(12) .returning(move |_| bail!("No address info")); let service_txs = get_service_txs(&mut mock_info_provider).await; let service_txs_copy = get_service_txs(&mut mock_info_provider).await; let mut service_txs_inter = service_txs.into_iter(); let expected = vec![ TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::DateLabel { timestamp: 1606694400000, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, ]; let actual = service_txs_to_tx_list_items(service_txs_copy, last_timestamp, utc_timezone_offset) .unwrap(); assert_eq!(expected, actual); } #[rocket::async_test] async fn service_txs_to_tx_list_items_date_label_berlin_timezone() { let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider .expect_token_info() .times(6) .returning(move |_| bail!("No token info")); mock_info_provider .expect_full_address_info_search() .times(12) .returning(move |_| bail!("No address info")); let service_txs = get_service_txs(&mut mock_info_provider).await; let service_txs_copy = get_service_txs(&mut mock_info_provider).await; let berlin_timezone_offset = 3600; // + 1 hours Germany/Berlin let mut service_txs_inter = service_txs.into_iter(); let expected = vec![ TransactionListItem::DateLabel { timestamp: 1606777200000, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::DateLabel { timestamp: 1606690800000, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, ]; let actual = service_txs_to_tx_list_items(service_txs_copy, -1, berlin_timezone_offset).unwrap(); assert_eq!(expected, actual); } #[rocket::async_test] async fn service_txs_to_tx_list_items_date_label_melbourne_timezone()
#[rocket::async_test] async fn service_txs_to_tx_list_items_date_label_buenos_aires_timezone() { let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider .expect_token_info() .times(6) .returning(move |_| bail!("No token info")); mock_info_provider .expect_full_address_info_search() .times(12) .returning(move |_| bail!("No address info")); let service_txs = get_service_txs(&mut mock_info_provider).await; let service_txs_copy = get_service_txs(&mut mock_info_provider).await; let buenos_aires_timezone_offset = -10800; // -3 hours Argentina/Buenos Aires let mut service_txs_inter = service_txs.into_iter(); let expected = vec![ TransactionListItem::DateLabel { timestamp: 1606791600000, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::DateLabel { timestamp: 1606705200000, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, ]; let actual = service_txs_to_tx_list_items(service_txs_copy, -1, buenos_aires_timezone_offset).unwrap(); assert_eq!(expected, actual); } #[rocket::async_test] #[should_panic] async fn peek_timestamp_and_remove_item_empty() { let utc_timezone_offset = 3600; let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider.expect_token_info().times(0); let backend_txs: Vec<Transaction> = vec![]; let mut backend_txs_iter = backend_txs.into_iter(); peek_timestamp_and_remove_item( &mut backend_txs_iter, &mut mock_info_provider, "0x1230B3d59858296A31053C1b8562Ecf89A2f888b", utc_timezone_offset, ) .await .unwrap(); } #[rocket::async_test] async fn peek_timestamp_and_remove_item_with_items() { let expected_timestamp = 1606780800000; let utc_timezone_offset = 0; let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider .expect_token_info() .times(1) .returning(move |_| bail!("No token info")); mock_info_provider .expect_full_address_info_search() .times(1) .return_once(move |_| bail!("No address info")); let backend_txs = serde_json::from_str::<Page<Transaction>>(BACKEND_HISTORY_TRANSACTION_LIST_PAGE) .unwrap() .results; let mut backend_txs_iter = backend_txs.into_iter(); let actual_timestamp = peek_timestamp_and_remove_item( &mut backend_txs_iter, &mut mock_info_provider, "0x1230B3d59858296A31053C1b8562Ecf89A2f888b", utc_timezone_offset, ) .await .unwrap(); assert_eq!(expected_timestamp, actual_timestamp); } #[test] fn get_day_timestamp_millis_for_02_12_2020_00_00_01() { let input = 1606867201000; // 1 second past the 2nd of December 2020 UTC let utc_timezone_offset = 0; let actual = get_day_timestamp_millis(input, utc_timezone_offset); let expected = 1606867200000; assert_eq!(expected, actual); } async fn get_service_txs(mock_info_provider: &mut MockInfoProvider) -> Vec<TransactionSummary> { let backend_txs = serde_json::from_str::<Page<Transaction>>(BACKEND_HISTORY_TRANSACTION_LIST_PAGE).unwrap(); let mut result = vec![]; for tx in backend_txs.results { result.extend( tx.to_transaction_summary( mock_info_provider, "0x1230B3d59858296A31053C1b8562Ecf89A2f888b", ) .await .unwrap_or_default(), ) } result }
{ let mut mock_info_provider = MockInfoProvider::new(); mock_info_provider.expect_safe_info().times(0); mock_info_provider .expect_token_info() .times(6) .returning(move |_| bail!("No token info")); mock_info_provider .expect_full_address_info_search() .times(12) .returning(move |_| bail!("No address info")); let service_txs = get_service_txs(&mut mock_info_provider).await; let service_txs_copy = get_service_txs(&mut mock_info_provider).await; let melbourne_timezone_offset = 39600; // + 11 hours Melbourne/Australia let mut service_txs_inter = service_txs.into_iter(); let expected = vec![ TransactionListItem::DateLabel { timestamp: 1606827600000, // 2020/12/02 }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::DateLabel { timestamp: 1606741200000, // 2020/12/01 }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, TransactionListItem::DateLabel { timestamp: 1606654800000, // 2020/11/30 }, TransactionListItem::Transaction { transaction: service_txs_inter.next().unwrap(), conflict_type: ConflictType::None, }, ]; let actual = service_txs_to_tx_list_items(service_txs_copy, -1, melbourne_timezone_offset).unwrap(); assert_eq!(expected, actual); }
EMI.py
# -*- encoding: utf-8 -*- from .. import db class EMI_Information(db.Model): __tablename__ = "EMI_Information" EMI_Identifier = db.Column(db.String(45),primary_key = True, nullable = False) ItemName = db.Column(db.String(45), nullable = False)
MonthlyEMI = db.Column(db.Float, nullable = False) def __repr__(self): # return { c.key : getattr(self, c.key) for c in self.__table__.columns } return f"<{self.EMI_Identifier}(ItemName = {self.ItemName}, ProductPrice = {self.ProductPrice}, Tenure = {self.Tenure}>" def toDict(self): return { c.key : getattr(self, c.key) for c in self.__table__.columns }
ProductPrice = db.Column(db.Float, nullable = False) InterestRate = db.Column(db.Float, nullable = False) Tenure = db.Column(db.Integer, nullable = False)
service_test.go
/* Copyright 2019 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resources import ( "testing" "github.com/google/go-cmp/cmp" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" pav1a1 "github.com/knative/serving/pkg/apis/autoscaling/v1alpha1" "github.com/knative/serving/pkg/apis/serving" autoscalingv1 "k8s.io/api/autoscaling/v1" ) var boolTrue = true func TestMakeService(t *testing.T) { pa := &pav1a1.PodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Namespace: "here", Name: "with-you", UID: "2006", // Those labels are propagated from the Revision->KPA. Labels: map[string]string{ serving.RevisionLabelKey: "with-you", serving.RevisionUID: "2009", }, Annotations: map[string]string{ "a": "b", }, }, Spec: pav1a1.PodAutoscalerSpec{ ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ APIVersion: "apps/v1", Kind: "Deployment", Name: "with-you", }, ServiceName: "with-you-service", }, } selector := map[string]string{"cant": "stop"} want := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Namespace: "here", Name: "with-you-metrics", Labels: map[string]string{ // Those should be propagated. serving.RevisionLabelKey: "with-you", serving.RevisionUID: "2009", kpaLabelKey: "with-you", }, Annotations: map[string]string{ "a": "b", }, OwnerReferences: []metav1.OwnerReference{{ APIVersion: pav1a1.SchemeGroupVersion.String(), Kind: "PodAutoscaler", Name: "with-you", UID: "2006", Controller: &boolTrue, BlockOwnerDeletion: &boolTrue, }}, }, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{{ Name: "metrics", Protocol: corev1.ProtocolTCP, Port: 9090, TargetPort: intstr.FromString("queue-metrics"), }}, Selector: selector, }, } got := MakeMetricsService(pa, selector) if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("Metrics K8s Service mismatch (-want, +got) = %v", diff) } }
gentx.go
package cli import ( "bufio" "bytes" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "github.com/pkg/errors" "github.com/spf13/cobra" flag "github.com/spf13/pflag" "github.com/spf13/viper" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" tmos "github.com/tendermint/tendermint/libs/os" tmtypes "github.com/tendermint/tendermint/types" "github.com/cosmos/cosmos-sdk/client/context" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/keys" "github.com/cosmos/cosmos-sdk/codec" kbkeys "github.com/cosmos/cosmos-sdk/crypto/keys" "github.com/cosmos/cosmos-sdk/server" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/x/auth" "github.com/cosmos/cosmos-sdk/x/auth/client/utils" "github.com/cosmos/cosmos-sdk/x/genutil" "github.com/cosmos/cosmos-sdk/x/genutil/types" ) // StakingMsgBuildingHelpers helpers for message building gen-tx command type StakingMsgBuildingHelpers interface { CreateValidatorMsgHelpers(ipDefault string) (fs *flag.FlagSet, nodeIDFlag, pubkeyFlag, amountFlag, defaultsDesc string) PrepareFlagsForTxCreateValidator(config *cfg.Config, nodeID, chainID string, valPubKey crypto.PubKey) BuildCreateValidatorMsg(cliCtx context.CLIContext, txBldr auth.TxBuilder) (auth.TxBuilder, sdk.Msg, error) } // GenTxCmd builds the application's gentx command. // nolint: errcheck func GenTxCmd(ctx *server.Context, cdc *codec.Codec, mbm module.BasicManager, smbh StakingMsgBuildingHelpers, genAccIterator types.GenesisAccountsIterator, defaultNodeHome, defaultCLIHome string) *cobra.Command { ipDefault, _ := server.ExternalIP() fsCreateValidator, flagNodeID, flagPubKey, flagAmount, defaultsDesc := smbh.CreateValidatorMsgHelpers(ipDefault) cmd := &cobra.Command{ Use: "gentx", Short: "Generate a genesis tx carrying a self delegation", Args: cobra.NoArgs, Long: fmt.Sprintf(`This command is an alias of the 'tx create-validator' command'. It creates a genesis transaction to create a validator. The following default parameters are included: %s`, defaultsDesc), RunE: func(cmd *cobra.Command, args []string) error { config := ctx.Config config.SetRoot(viper.GetString(flags.FlagHome)) nodeID, valPubKey, err := genutil.InitializeNodeValidatorFiles(ctx.Config) if err != nil { return errors.Wrap(err, "failed to initialize node validator files") } // Read --nodeID, if empty take it from priv_validator.json if nodeIDString := viper.GetString(flagNodeID); nodeIDString != "" { nodeID = nodeIDString } // Read --pubkey, if empty take it from priv_validator.json if valPubKeyString := viper.GetString(flagPubKey); valPubKeyString != "" { valPubKey, err = sdk.GetPubKeyFromBech32(sdk.Bech32PubKeyTypeConsPub, valPubKeyString) if err != nil { return errors.Wrap(err, "failed to get consensus node public key") } } genDoc, err := tmtypes.GenesisDocFromFile(config.GenesisFile()) if err != nil { return errors.Wrapf(err, "failed to read genesis doc file %s", config.GenesisFile()) } var genesisState map[string]json.RawMessage if err = cdc.UnmarshalJSON(genDoc.AppState, &genesisState); err != nil { return errors.Wrap(err, "failed to unmarshal genesis state") } if err = mbm.ValidateGenesis(genesisState); err != nil { return errors.Wrap(err, "failed to validate genesis state") } inBuf := bufio.NewReader(cmd.InOrStdin()) kb, err := keys.NewKeyringFromDir(viper.GetString(flagClientHome), inBuf) if err != nil { return errors.Wrap(err, "failed to initialize keybase") } name := viper.GetString(flags.FlagName) key, err := kb.Get(name) if err != nil { return errors.Wrap(err, "failed to read from keybase") } // Set flags for creating gentx viper.Set(flags.FlagHome, viper.GetString(flagClientHome)) smbh.PrepareFlagsForTxCreateValidator(config, nodeID, genDoc.ChainID, valPubKey) // Fetch the amount of coins staked amount := viper.GetString(flagAmount) coins, err := sdk.ParseCoins(amount) if err != nil { return errors.Wrap(err, "failed to parse coins") } err = genutil.ValidateAccountInGenesis(genesisState, genAccIterator, key.GetAddress(), coins, cdc) if err != nil { return errors.Wrap(err, "failed to validate account in genesis") } txBldr := auth.NewTxBuilderFromCLI(inBuf).WithTxEncoder(utils.GetTxEncoder(cdc)) cliCtx := context.NewCLIContextWithInput(inBuf).WithCodec(cdc) // Set the generate-only flag here after the CLI context has // been created. This allows the from name/key to be correctly populated. // // TODO: Consider removing the manual setting of generate-only in // favor of a 'gentx' flag in the create-validator command. viper.Set(flags.FlagGenerateOnly, true) // create a 'create-validator' message txBldr, msg, err := smbh.BuildCreateValidatorMsg(cliCtx, txBldr) if err != nil { return errors.Wrap(err, "failed to build create-validator message") } if key.GetType() == kbkeys.TypeOffline || key.GetType() == kbkeys.TypeMulti { fmt.Println("Offline key passed in. Use `tx sign` command to sign:") return utils.PrintUnsignedStdTx(txBldr, cliCtx, []sdk.Msg{msg}) } // write the unsigned transaction to the buffer w := bytes.NewBuffer([]byte{}) cliCtx = cliCtx.WithOutput(w) if err = utils.PrintUnsignedStdTx(txBldr, cliCtx, []sdk.Msg{msg}); err != nil { return errors.Wrap(err, "failed to print unsigned std tx") } // read the transaction stdTx, err := readUnsignedGenTxFile(cdc, w) if err != nil { return errors.Wrap(err, "failed to read unsigned gen tx file") } // sign the transaction and write it to the output file signedTx, err := utils.SignStdTx(txBldr, cliCtx, name, stdTx, false, true) if err != nil { return errors.Wrap(err, "failed to sign std tx") } // Fetch output file name outputDocument := viper.GetString(flags.FlagOutputDocument) if outputDocument == "" { outputDocument, err = makeOutputFilepath(config.RootDir, nodeID) if err != nil { return errors.Wrap(err, "failed to create output file path") } } if err := writeSignedGenTx(cdc, outputDocument, signedTx); err != nil { return errors.Wrap(err, "failed to write signed gen tx") } fmt.Fprintf(os.Stderr, "Genesis transaction written to %q\n", outputDocument) return nil }, } cmd.Flags().String(flags.FlagHome, defaultNodeHome, "node's home directory") cmd.Flags().String(flagClientHome, defaultCLIHome, "client's home directory") cmd.Flags().String(flags.FlagName, "", "name of private key with which to sign the gentx") cmd.Flags().String(flags.FlagOutputDocument, "", "write the genesis transaction JSON document to the given file instead of the default location") cmd.Flags().AddFlagSet(fsCreateValidator) cmd.Flags().String(flags.FlagKeyringBackend, flags.DefaultKeyringBackend, "Select keyring's backend (os|file|test)") viper.BindPFlag(flags.FlagKeyringBackend, cmd.Flags().Lookup(flags.FlagKeyringBackend)) cmd.MarkFlagRequired(flags.FlagName) return cmd } func makeOutputFilepath(rootDir, nodeID string) (string, error) { writePath := filepath.Join(rootDir, "config", "gentx") if err := tmos.EnsureDir(writePath, 0700); err != nil { return "", err } return filepath.Join(writePath, fmt.Sprintf("gentx-%v.json", nodeID)), nil } func
(cdc *codec.Codec, r io.Reader) (auth.StdTx, error) { var stdTx auth.StdTx bytes, err := ioutil.ReadAll(r) if err != nil { return stdTx, err } err = cdc.UnmarshalJSON(bytes, &stdTx) return stdTx, err } func writeSignedGenTx(cdc *codec.Codec, outputDocument string, tx auth.StdTx) error { outputFile, err := os.OpenFile(outputDocument, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0644) if err != nil { return err } defer outputFile.Close() json, err := cdc.MarshalJSON(tx) if err != nil { return err } _, err = fmt.Fprintf(outputFile, "%s\n", json) return err } // DONTCOVER
readUnsignedGenTxFile
main.min.js
/*! * Minimal Mistakes Jekyll Theme 4.21.0 by Michael Rose * Copyright 2013-2020 Michael Rose - mademistakes.com | @mmistakes * Licensed under MIT */ !function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(T,e){"use strict";function m(e){return null!=e&&e===e.window}var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},b=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},E=T.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function x(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.5.1",S=function(e,t){return new S.fn.init(e,t)};function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!b(e)&&!m(e)&&("array"===n||0===t||"number"==typeof t&&0<t&&t-1 in e)}S.fn=S.prototype={jquery:f,constructor:S,length:0,toArray:function(){return s.call(this)},get:function(e){return null==e?s.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=S.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return S.each(this,e)},map:function(n){return this.pushStack(S.map(this,function(e,t){return n.call(e,t,e)}))},slice:function(){return this.pushStack(s.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},even:function(){return this.pushStack(S.grep(this,function(e,t){return(t+1)%2}))},odd:function(){return this.pushStack(S.grep(this,function(e,t){return t%2}))},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(0<=n&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:u,sort:t.sort,splice:t.splice},S.extend=S.fn.extend=function(){var e,t,n,r,i,o,a=arguments[0]||{},s=1,u=arguments.length,l=!1;for("boolean"==typeof a&&(l=a,a=arguments[s]||{},s++),"object"==typeof a||b(a)||(a={}),s===u&&(a=this,s--);s<u;s++)if(null!=(e=arguments[s]))for(t in e)r=e[t],"__proto__"!==t&&a!==r&&(l&&r&&(S.isPlainObject(r)||(i=Array.isArray(r)))?(n=a[t],o=i&&!Array.isArray(n)?[]:i||S.isPlainObject(n)?n:{},i=!1,a[t]=S.extend(l,o,r)):void 0!==r&&(a[t]=r));return a},S.extend({expando:"jQuery"+(f+Math.random()).replace(/\D/g,"),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==o.call(e))&&(!(t=r(e))||"function"==typeof(n=v.call(t,"constructor")&&t.constructor)&&a.call(n)===l)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e,t,n){x(e,{nonce:t&&t.nonce},n)},each:function(e,t){var n,r=0;if(d(e))for(n=e.length;r<n&&!1!==t.call(e[r],r,e[r]);r++);else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},makeArray:function(e,t){var n=t||[];return null!=e&&(d(Object(e))?S.merge(n,"string"==typeof e?[e]:e):u.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:i.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,i=e.length;r<n;r++)e[i++]=t[r];return e.length=i,e},grep:function(e,t,n){for(var r=[],i=0,o=e.length,a=!n;i<o;i++)!t(e[i],i)!=a&&r.push(e[i]);return r},map:function(e,t,n){var r,i,o=0,a=[];if(d(e))for(r=e.length;o<r;o++)null!=(i=t(e[o],o,n))&&a.push(i);else for(o in e)null!=(i=t(e[o],o,n))&&a.push(i);return g(a)},guid:1,support:y}),"function"==typeof Symbol&&(S.fn[Symbol.iterator]=t[Symbol.iterator]),S.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(e,t){n["[object "+t+"]"]=t.toLowerCase()});var p=function(n){function f(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(65536+n):String.fromCharCode(n>>10|55296,1023&n|56320))}function i(){C()}var e,p,x,o,a,h,d,m,w,u,l,C,T,s,E,g,c,v,y,S="sizzle"+ +new Date,b=n.document,k=0,r=0,A=ue(),N=ue(),j=ue(),I=ue(),L=function(e,t){return e===t&&(l=!0),0},D={}.hasOwnProperty,t=[],O=t.pop,P=t.push,H=t.push,q=t.slice,M=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},_="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",$="[\\x20\\t\\r\\n\\f]",R="(?:\\\\[\\da-fA-F]{1,6}"+$+"?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+",B="\\["+$+"*("+R+")(?:"+$+"*([*^$|!~]?=)"+$+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+R+"))|)"+$+"*\\]",F=":("+R+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+B+")*)|.*)\\)|)",z=new RegExp($+"+","g"),W=new RegExp("^"+$+"+|((?:^|[^\\\\])(?:\\\\.)*)"+$+"+$","g"),U=new RegExp("^"+$+"*,"+$+"*"),X=new RegExp("^"+$+"*([>+~]|"+$+")"+$+"*"),Q=new RegExp($+"|>"),Y=new RegExp(F),V=new RegExp("^"+R+"$"),G={ID:new RegExp("^#("+R+")"),CLASS:new RegExp("^\\.("+R+")"),TAG:new RegExp("^("+R+"|[*])"),ATTR:new RegExp("^"+B),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+$+"*(even|odd|(([+-]|)(\\d*)n|)"+$+"*(?:([+-]|)"+$+"*(\\d+)|))"+$+"*\\)|)","i"),bool:new RegExp("^(?:"+_+")$","i"),needsContext:new RegExp("^"+$+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+$+"*((?:-\\d)?\\d*)"+$+"*\\)|)(?=[^-]|$)","i")},K=/HTML$/i,Z=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,ee=/^[^{]+\{\s*\[native \w/,te=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ne=/[+~]/,re=new RegExp("\\\\[\\da-fA-F]{1,6}"+$+"?|\\\\([^\\r\\n\\f])","g"),ie=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,oe=function(e,t){return t?"\0"===e?"'":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},ae=xe(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=q.call(b.childNodes),b.childNodes),t[b.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){P.apply(e,q.call(t))}:function(e,t){for(var n=e.length,r=0;e[n++]=t[r++];);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,d=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==d&&9!==d&&11!==d)return n;if(!r&&(C(e),e=e||T,E)){if(11!==d&&(u=te.exec(t)))if(i=u[1]){if(9===d){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&p.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(p.qsa&&!I[t+" "]&&(!g||!g.test(t))&&(1!==d||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===d&&(Q.test(t)||X.test(t))){for((f=ne.test(t)&&ve(e.parentNode)||e)===e&&p.scope||((s=e.getAttribute("id"))?s=s.replace(ie,oe):e.setAttribute("id",s=S)),o=(l=h(t)).length;o--;)l[o]=(s?"#"+s:":scope")+" "+be(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){I(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return m(t.replace(W,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>x.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=T.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){for(var n=e.split("|"),r=n.length;r--;)x.attrHandle[n[r]]=t}function de(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)for(;n=n.nextSibling;)if(n===t)return-1;return e?1:-1}function pe(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function me(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ge(a){return le(function(o){return o=+o,le(function(e,t){for(var n,r=a([],e.length,o),i=r.length;i--;)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ve(e){return e&&void 0!==e.getElementsByTagName&&e}for(e in p=se.support={},a=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!K.test(t||n&&n.nodeName||"HTML")},C=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:b;return r!=T&&9===r.nodeType&&r.documentElement&&(s=(T=r).documentElement,E=!a(T),b!=T&&(n=T.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",i,!1):n.attachEvent&&n.attachEvent("onunload",i)),p.scope=ce(function(e){return s.appendChild(e).appendChild(T.createElement("div")),void 0!==e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),p.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),p.getElementsByTagName=ce(function(e){return e.appendChild(T.createComment(")),!e.getElementsByTagName("*").length}),p.getElementsByClassName=ee.test(T.getElementsByClassName),p.getById=ce(function(e){return s.appendChild(e).id=S,!T.getElementsByName||!T.getElementsByName(S).length}),p.getById?(x.filter.ID=function(e){var t=e.replace(re,f);return function(e){return e.getAttribute("id")===t}},x.find.ID=function(e,t){if(void 0!==t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(x.filter.ID=function(e){var n=e.replace(re,f);return function(e){var t=void 0!==e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},x.find.ID=function(e,t){if(void 0!==t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];for(i=t.getElementsByName(e),r=0;o=i[r++];)if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),x.find.TAG=p.getElementsByTagName?function(e,t){return void 0!==t.getElementsByTagName?t.getElementsByTagName(e):p.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"!==e)return o;for(;n=o[i++];)1===n.nodeType&&r.push(n);return r},x.find.CLASS=p.getElementsByClassName&&function(e,t){if(void 0!==t.getElementsByClassName&&E)return t.getElementsByClassName(e)},c=[],g=[],(p.qsa=ee.test(T.querySelectorAll))&&(ce(function(e){var t;s.appendChild(e).innerHTML="<a id='"+S+"'></a><select id='"+S+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&g.push("[*^$]="+$+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||g.push("\\["+$+"*(?:value|"+_+")"),e.querySelectorAll("[id~="+S+"-]").length||g.push("~="),(t=T.createElement("input")).setAttribute("name","),e.appendChild(t),e.querySelectorAll("[name='']").length||g.push("\\["+$+"*name"+$+"*="+$+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||g.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||g.push(".#.+[+~]"),e.querySelectorAll("\\\f"),g.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=T.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&g.push("name"+$+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&g.push(":enabled",":disabled"),s.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&g.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),g.push(",.*:")})),(p.matchesSelector=ee.test(v=s.matches||s.webkitMatchesSelector||s.mozMatchesSelector||s.oMatchesSelector||s.msMatchesSelector))&&ce(function(e){p.disconnectedMatch=v.call(e,"*"),v.call(e,"[s!='']:x"),c.push("!=",F)}),g=g.length&&new RegExp(g.join("|")),c=c.length&&new RegExp(c.join("|")),t=ee.test(s.compareDocumentPosition),y=t||ee.test(s.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)for(;t=t.parentNode;)if(t===e)return!0;return!1},L=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!p.sortDetached&&t.compareDocumentPosition(e)===n?e==T||e.ownerDocument==b&&y(b,e)?-1:t==T||t.ownerDocument==b&&y(b,t)?1:u?M(u,e)-M(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==T?-1:t==T?1:i?-1:o?1:u?M(u,e)-M(u,t):0;if(i===o)return de(e,t);for(n=e;n=n.parentNode;)a.unshift(n);for(n=t;n=n.parentNode;)s.unshift(n);for(;a[r]===s[r];)r++;return r?de(a[r],s[r]):a[r]==b?-1:s[r]==b?1:0}),T},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(C(e),p.matchesSelector&&E&&!I[t+" "]&&(!c||!c.test(t))&&(!g||!g.test(t)))try{var n=v.call(e,t);if(n||p.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){I(t,!0)}return 0<se(t,T,null,[e]).length},se.contains=function(e,t){return(e.ownerDocument||e)!=T&&C(e),y(e,t)},se.attr=function(e,t){(e.ownerDocument||e)!=T&&C(e);var n=x.attrHandle[t.toLowerCase()],r=n&&D.call(x.attrHandle,t.toLowerCase())?n(e,t,!E):void 0;return void 0!==r?r:p.attributes||!E?e.getAttribute(t):(r=e.getAttributeNode(t))&&r.specified?r.value:null},se.escape=function(e){return(e+").replace(ie,oe)},se.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},se.uniqueSort=function(e){var t,n=[],r=0,i=0;if(l=!p.detectDuplicates,u=!p.sortStable&&e.slice(0),e.sort(L),l){for(;t=e[i++];)t===e[i]&&(r=n.push(i));for(;r--;)e.splice(n[r],1)}return u=null,e},o=se.getText=function(e){var t,n=",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else for(;t=e[r++];)n+=o(t);return n},(x=se.selectors={cacheLength:50,createPseudo:le,match:G,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(re,f),e[3]=(e[3]||e[4]||e[5]||").replace(re,f),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||":n&&Y.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(re,f).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=A[e+" "];return t||(t=new RegExp("(^|"+$+")"+e+"("+$+"|$)"))&&A(e,function(e){return t.test("string"==typeof e.className&&e.className||void 0!==e.getAttribute&&e.getAttribute("class")||")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+=","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1<t.indexOf(i):"$="===r?i&&t.slice(-i.length)===i:"~="===r?-1<(" "+t.replace(z," ")+" ").indexOf(i):"|="===r&&(t===i||t.slice(0,i.length+1)===i+"-"))}},CHILD:function(h,e,t,m,g){var v="nth"!==h.slice(0,3),y="last"!==h.slice(-4),b="of-type"===e;return 1===m&&0===g?function(e){return!!e.parentNode}:function(e,t,n){var r,i,o,a,s,u,l=v!=y?"nextSibling":"previousSibling",c=e.parentNode,f=b&&e.nodeName.toLowerCase(),d=!n&&!b,p=!1;if(c){if(v){for(;l;){for(a=e;a=a[l];)if(b?a.nodeName.toLowerCase()===f:1===a.nodeType)return!1;u=l="only"===h&&!u&&"nextSibling"}return!0}if(u=[y?c.firstChild:c.lastChild],y&&d){for(p=(s=(r=(i=(o=(a=c)[S]||(a[S]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===k&&r[1])&&r[2],a=s&&c.childNodes[s];a=++s&&a&&a[l]||(p=s=0)||u.pop();)if(1===a.nodeType&&++p&&a===e){i[h]=[k,s,p];break}}else if(d&&(p=s=(r=(i=(o=(a=e)[S]||(a[S]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]||[])[0]===k&&r[1]),!1===p)for(;(a=++s&&a&&a[l]||(p=s=0)||u.pop())&&((b?a.nodeName.toLowerCase()!==f:1!==a.nodeType)||!++p||(d&&((i=(o=a[S]||(a[S]={}))[a.uniqueID]||(o[a.uniqueID]={}))[h]=[k,p]),a!==e)););return(p-=g)===m||p%m==0&&0<=p/m}}},PSEUDO:function(e,o){var t,a=x.pseudos[e]||x.setFilters[e.toLowerCase()]||se.error("unsupported pseudo: "+e);return a[S]?a(o):1<a.length?(t=[e,e,",o],x.setFilters.hasOwnProperty(e.toLowerCase())?le(function(e,t){for(var n,r=a(e,o),i=r.length;i--;)e[n=M(e,r[i])]=!(t[n]=r[i])}):function(e){return a(e,0,t)}):a}},pseudos:{not:le(function(e){var r=[],i=[],s=d(e.replace(W,"$1"));return s[S]?le(function(e,t,n,r){for(var i,o=s(e,null,r,[]),a=e.length;a--;)(i=o[a])&&(e[a]=!(t[a]=i))}):function(e,t,n){return r[0]=e,s(r,null,n,i),r[0]=null,!i.pop()}}),has:le(function(t){return function(e){return 0<se(t,e).length}}),contains:le(function(t){return t=t.replace(re,f),function(e){return-1<(e.textContent||o(e)).indexOf(t)}}),lang:le(function(n){return V.test(n||")||se.error("unsupported lang: "+n),n=n.replace(re,f).toLowerCase(),function(e){var t;do{if(t=E?e.lang:e.getAttribute("xml:lang")||e.getAttribute("lang"))return(t=t.toLowerCase())===n||0===t.indexOf(n+"-")}while((e=e.parentNode)&&1===e.nodeType);return!1}}),target:function(e){var t=n.location&&n.location.hash;return t&&t.slice(1)===e.id},root:function(e){return e===s},focus:function(e){return e===T.activeElement&&(!T.hasFocus||T.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:me(!1),disabled:me(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!x.pseudos.empty(e)},header:function(e){return J.test(e.nodeName)},input:function(e){return Z.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ge(function(){return[0]}),last:ge(function(e,t){return[t-1]}),eq:ge(function(e,t,n){return[n<0?n+t:n]}),even:ge(function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e}),odd:ge(function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e}),lt:ge(function(e,t,n){for(var r=n<0?n+t:t<n?t:n;0<=--r;)e.push(r);return e}),gt:ge(function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e})}}).pseudos.nth=x.pseudos.eq,{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})x.pseudos[e]=pe(e);for(e in{submit:!0,reset:!0})x.pseudos[e]=he(e);function ye(){}function be(e){for(var t=0,n=e.length,r=";t<n;t++)r+=e[t].value;return r}function xe(s,e,t){var u=e.dir,l=e.next,c=l||u,f=t&&"parentNode"===c,d=r++;return e.first?function(e,t,n){for(;e=e[u];)if(1===e.nodeType||f)return s(e,t,n);return!1}:function(e,t,n){var r,i,o,a=[k,d];if(n){for(;e=e[u];)if((1===e.nodeType||f)&&s(e,t,n))return!0}else for(;e=e[u];)if(1===e.nodeType||f)if(i=(o=e[S]||(e[S]={}))[e.uniqueID]||(o[e.uniqueID]={}),l&&l===e.nodeName.toLowerCase())e=e[u]||e;else{if((r=i[c])&&r[0]===k&&r[1]===d)return a[2]=r[2];if((i[c]=a)[2]=s(e,t,n))return!0}return!1}}function we(i){return 1<i.length?function(e,t,n){for(var r=i.length;r--;)if(!i[r](e,t,n))return!1;return!0}:i[0]}function Ce(e,t,n,r,i){for(var o,a=[],s=0,u=e.length,l=null!=t;s<u;s++)(o=e[s])&&(n&&!n(o,r,i)||(a.push(o),l&&t.push(s)));return a}function Te(p,h,m,g,v,e){return g&&!g[S]&&(g=Te(g)),v&&!v[S]&&(v=Te(v,e)),le(function(e,t,n,r){var i,o,a,s=[],u=[],l=t.length,c=e||function(e,t,n){for(var r=0,i=t.length;r<i;r++)se(e,t[r],n);return n}(h||"*",n.nodeType?[n]:n,[]),f=!p||!e&&h?c:Ce(c,s,p,n,r),d=m?v||(e?p:l||g)?[]:t:f;if(m&&m(f,d,n,r),g)for(i=Ce(d,u),g(i,[],n,r),o=i.length;o--;)(a=i[o])&&(d[u[o]]=!(f[u[o]]=a));if(e){if(v||p){if(v){for(i=[],o=d.length;o--;)(a=d[o])&&i.push(f[o]=a);v(null,d=[],i,r)}for(o=d.length;o--;)(a=d[o])&&-1<(i=v?M(e,a):s[o])&&(e[i]=!(t[i]=a))}}else d=Ce(d===t?d.splice(l,d.length):d),v?v(null,t,d,r):H.apply(t,d)})}function Ee(e){for(var i,t,n,r=e.length,o=x.relative[e[0].type],a=o||x.relative[" "],s=o?1:0,u=xe(function(e){return e===i},a,!0),l=xe(function(e){return-1<M(i,e)},a,!0),c=[function(e,t,n){var r=!o&&(n||t!==w)||((i=t).nodeType?u:l)(e,t,n);return i=null,r}];s<r;s++)if(t=x.relative[e[s].type])c=[xe(we(c),t)];else{if((t=x.filter[e[s].type].apply(null,e[s].matches))[S]){for(n=++s;n<r&&!x.relative[e[n].type];n++);return Te(1<s&&we(c),1<s&&be(e.slice(0,s-1).concat({value:" "===e[s-2].type?"*":"})).replace(W,"$1"),t,s<n&&Ee(e.slice(s,n)),n<r&&Ee(e=e.slice(n)),n<r&&be(e))}c.push(t)}return we(c)}function Se(g,v){function e(e,t,n,r,i){var o,a,s,u=0,l="0",c=e&&[],f=[],d=w,p=e||b&&x.find.TAG("*",i),h=k+=null==d?1:Math.random()||.1,m=p.length;for(i&&(w=t==T||t||i);l!==m&&null!=(o=p[l]);l++){if(b&&o){for(a=0,t||o.ownerDocument==T||(C(o),n=!E);s=g[a++];)if(s(o,t||T,n)){r.push(o);break}i&&(k=h)}y&&((o=!s&&o)&&u--,e&&c.push(o))}if(u+=l,y&&l!==u){for(a=0;s=v[a++];)s(c,f,t,n);if(e){if(0<u)for(;l--;)c[l]||f[l]||(f[l]=O.call(r));f=Ce(f)}H.apply(r,f),i&&!e&&0<f.length&&1<u+v.length&&se.uniqueSort(r)}return i&&(k=h,w=d),c}var y=0<v.length,b=0<g.length;return y?le(e):e}return ye.prototype=x.filters=x.pseudos,x.setFilters=new ye,h=se.tokenize=function(e,t){var n,r,i,o,a,s,u,l=N[e+" "];if(l)return t?0:l.slice(0);for(a=e,s=[],u=x.preFilter;a;){for(o in n&&!(r=U.exec(a))||(r&&(a=a.slice(r[0].length)||a),s.push(i=[])),n=!1,(r=X.exec(a))&&(n=r.shift(),i.push({value:n,type:r[0].replace(W," ")}),a=a.slice(n.length)),x.filter)!(r=G[o].exec(a))||u[o]&&!(r=u[o](r))||(n=r.shift(),i.push({value:n,type:o,matches:r}),a=a.slice(n.length));if(!n)break}return t?a.length:a?se.error(e):N(e,s).slice(0)},d=se.compile=function(e,t){var n,r=[],i=[],o=j[e+" "];if(!o){for(n=(t=t||h(e)).length;n--;)(o=Ee(t[n]))[S]?r.push(o):i.push(o);(o=j(e,Se(i,r))).selector=e}return o},m=se.select=function(e,t,n,r){var i,o,a,s,u,l="function"==typeof e&&e,c=!r&&h(e=l.selector||e);if(n=n||[],1===c.length){if(2<(o=c[0]=c[0].slice(0)).length&&"ID"===(a=o[0]).type&&9===t.nodeType&&E&&x.relative[o[1].type]){if(!(t=(x.find.ID(a.matches[0].replace(re,f),t)||[])[0]))return n;l&&(t=t.parentNode),e=e.slice(o.shift().value.length)}for(i=G.needsContext.test(e)?0:o.length;i--&&(a=o[i],!x.relative[s=a.type]);)if((u=x.find[s])&&(r=u(a.matches[0].replace(re,f),ne.test(o[0].type)&&ve(t.parentNode)||t))){if(o.splice(i,1),!(e=r.length&&be(o)))return H.apply(n,r),n;break}}return(l||d(e,c))(r,t,!E,n,!t||ne.test(e)&&ve(t.parentNode)||t),n},p.sortStable=S.split(").sort(L).join(")===S,p.detectDuplicates=!!l,C(),p.sortDetached=ce(function(e){return 1&e.compareDocumentPosition(T.createElement("fieldset"))}),ce(function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")})||fe("type|href|height|width",function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)}),p.attributes&&ce(function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value","),"===e.firstChild.getAttribute("value")})||fe("value",function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue}),ce(function(e){return null==e.getAttribute("disabled")})||fe(_,function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null}),se}(T);S.find=p,S.expr=p.selectors,S.expr[":"]=S.expr.pseudos,S.uniqueSort=S.unique=p.uniqueSort,S.text=p.getText,S.isXMLDoc=p.isXML,S.contains=p.contains,S.escapeSelector=p.escape;function h(e,t,n){for(var r=[],i=void 0!==n;(e=e[t])&&9!==e.nodeType;)if(1===e.nodeType){if(i&&S(e).is(n))break;r.push(e)}return r}function C(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n}var k=S.expr.match.needsContext;function A(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var N=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return b(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1<i.call(n,e)!==r}):S.filter(n,e,r)}S.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?S.find.matchesSelector(r,e)?[r]:[]:S.find.matches(e,S.grep(t,function(e){return 1===e.nodeType}))},S.fn.extend({find:function(e){var t,n,r=this.length,i=this;if("string"!=typeof e)return this.pushStack(S(e).filter(function(){for(t=0;t<r;t++)if(S.contains(i[t],this))return!0}));for(n=this.pushStack([]),t=0;t<r;t++)S.find(e,i[t],n);return 1<r?S.uniqueSort(n):n},filter:function(e){return this.pushStack(j(this,e||[],!1))},not:function(e){return this.pushStack(j(this,e||[],!0))},is:function(e){return!!j(this,"string"==typeof e&&k.test(e)?S(e):e||[],!1).length}});var I,L=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||I,"string"!=typeof e)return e.nodeType?(this[0]=e,this.length=1,this):b(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this);if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)b(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}).prototype=S.fn,I=S(E);var D=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){for(;(e=e[t])&&1!==e.nodeType;);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e<n;e++)if(S.contains(this,t[e]))return!0})},closest:function(e,t){var n,r=0,i=this.length,o=[],a="string"!=typeof e&&S(e);if(!k.test(e))for(;r<i;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(a?-1<a.index(n):1===n.nodeType&&S.find.matchesSelector(n,e))){o.push(n);break}return this.pushStack(1<o.length?S.uniqueSort(o):o)},index:function(e){return e?"string"==typeof e?i.call(S(e),this[0]):i.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(S.uniqueSort(S.merge(this.get(),S(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),S.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return h(e,"parentNode")},parentsUntil:function(e,t,n){return h(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return h(e,"nextSibling")},prevAll:function(e){return h(e,"previousSibling")},nextUntil:function(e,t,n){return h(e,"nextSibling",n)},prevUntil:function(e,t,n){return h(e,"previousSibling",n)},siblings:function(e){return C((e.parentNode||{}).firstChild,e)},children:function(e){return C(e.firstChild)},contents:function(e){return null!=e.contentDocument&&r(e.contentDocument)?e.contentDocument:(A(e,"template")&&(e=e.content||e),S.merge([],e.childNodes))}},function(r,i){S.fn[r]=function(e,t){var n=S.map(this,i,e);return"Until"!==r.slice(-5)&&(t=e),t&&"string"==typeof t&&(n=S.filter(t,n)),1<this.length&&(O[r]||S.uniqueSort(n),D.test(r)&&n.reverse()),this.pushStack(n)}});var H=/[^\x20\t\r\n\f]+/g;function q(e){return e}function M(e){throw e}function _(e,t,n,r){var i;try{e&&b(i=e.promise)?i.call(e).done(t).fail(n):e&&b(i=e.then)?i.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}S.Callbacks=function(r){var e,n;r="string"==typeof r?(e=r,n={},S.each(e.match(H)||[],function(e,t){n[t]=!0}),n):S.extend({},r);function i(){for(s=s||r.once,a=o=!0;l.length;c=-1)for(t=l.shift();++c<u.length;)!1===u[c].apply(t[0],t[1])&&r.stopOnFalse&&(c=u.length,t=!1);r.memory||(t=!1),o=!1,s&&(u=t?[]:")}var o,t,a,s,u=[],l=[],c=-1,f={add:function(){return u&&(t&&!o&&(c=u.length-1,l.push(t)),function n(e){S.each(e,function(e,t){b(t)?r.unique&&f.has(t)||u.push(t):t&&t.length&&"string"!==w(t)&&n(t)})}(arguments),t&&!o&&i()),this},remove:function(){return S.each(arguments,function(e,t){for(var n;-1<(n=S.inArray(t,u,n));)u.splice(n,1),n<=c&&c--}),this},has:function(e){return e?-1<S.inArray(e,u):0<u.length},empty:function(){return u=u&&[],this},disable:function(){return s=l=[],u=t=",this},disabled:function(){return!u},lock:function(){return s=l=[],t||o||(u=t="),this},locked:function(){return!!s},fireWith:function(e,t){return s||(t=[e,(t=t||[]).slice?t.slice():t],l.push(t),o||i()),this},fire:function(){return f.fireWith(this,arguments),this},fired:function(){return!!a}};return f},S.extend({Deferred:function(e){var o=[["notify","progress",S.Callbacks("memory"),S.Callbacks("memory"),2],["resolve","done",S.Callbacks("once memory"),S.Callbacks("once memory"),0,"resolved"],["reject","fail",S.Callbacks("once memory"),S.Callbacks("once memory"),1,"rejected"]],i="pending",a={state:function(){return i},always:function(){return s.done(arguments).fail(arguments),this},catch:function(e){return a.then(null,e)},pipe:function(){var i=arguments;return S.Deferred(function(r){S.each(o,function(e,t){var n=b(i[t[4]])&&i[t[4]];s[t[1]](function(){var e=n&&n.apply(this,arguments);e&&b(e.promise)?e.promise().progress(r.notify).done(r.resolve).fail(r.reject):r[t[0]+"With"](this,n?[e]:arguments)})}),i=null}).promise()},then:function(t,n,r){var u=0;function l(i,o,a,s){return function(){function e(){var e,t;if(!(i<u)){if((e=a.apply(n,r))===o.promise())throw new TypeError("Thenable self-resolution");t=e&&("object"==typeof e||"function"==typeof e)&&e.then,b(t)?s?t.call(e,l(u,o,q,s),l(u,o,M,s)):(u++,t.call(e,l(u,o,q,s),l(u,o,M,s),l(u,o,q,o.notifyWith))):(a!==q&&(n=void 0,r=[e]),(s||o.resolveWith)(n,r))}}var n=this,r=arguments,t=s?e:function(){try{e()}catch(e){S.Deferred.exceptionHook&&S.Deferred.exceptionHook(e,t.stackTrace),u<=i+1&&(a!==M&&(n=void 0,r=[e]),o.rejectWith(n,r))}};i?t():(S.Deferred.getStackHook&&(t.stackTrace=S.Deferred.getStackHook()),T.setTimeout(t))}}return S.Deferred(function(e){o[0][3].add(l(0,e,b(r)?r:q,e.notifyWith)),o[1][3].add(l(0,e,b(t)?t:q)),o[2][3].add(l(0,e,b(n)?n:M))}).promise()},promise:function(e){return null!=e?S.extend(e,a):a}},s={};return S.each(o,function(e,t){var n=t[2],r=t[5];a[t[1]]=n.add,r&&n.add(function(){i=r},o[3-e][2].disable,o[3-e][3].disable,o[0][2].lock,o[0][3].lock),n.add(t[3].fire),s[t[0]]=function(){return s[t[0]+"With"](this===s?void 0:this,arguments),this},s[t[0]+"With"]=n.fireWith}),a.promise(s),e&&e.call(s,s),s},when:function(e){function t(t){return function(e){i[t]=this,o[t]=1<arguments.length?s.call(arguments):e,--n||a.resolveWith(i,o)}}var n=arguments.length,r=n,i=Array(r),o=s.call(arguments),a=S.Deferred();if(n<=1&&(_(e,a.done(t(r)).resolve,a.reject,!n),"pending"===a.state()||b(o[r]&&o[r].then)))return a.then();for(;r--;)_(o[r],t(r),a.reject);return a.promise()}});var $=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;S.Deferred.exceptionHook=function(e,t){T.console&&T.console.warn&&e&&$.test(e.name)&&T.console.warn("jQuery.Deferred exception: "+e.message,e.stack,t)},S.readyException=function(e){T.setTimeout(function(){throw e})};var R=S.Deferred();function B(){E.removeEventListener("DOMContentLoaded",B),T.removeEventListener("load",B),S.ready()}S.fn.ready=function(e){return R.then(e).catch(function(e){S.readyException(e)}),this},S.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--S.readyWait:S.isReady)||(S.isReady=!0)!==e&&0<--S.readyWait||R.resolveWith(E,[S])}}),S.ready.then=R.then,"complete"===E.readyState||"loading"!==E.readyState&&!E.documentElement.doScroll?T.setTimeout(S.ready):(E.addEventListener("DOMContentLoaded",B),T.addEventListener("load",B));var F=function(e,t,n,r,i,o,a){var s=0,u=e.length,l=null==n;if("object"===w(n))for(s in i=!0,n)F(e,t,s,n[s],!0,o,a);else if(void 0!==r&&(i=!0,b(r)||(a=!0),l&&(t=a?(t.call(e,r),null):(l=t,function(e,t,n){return l.call(S(e),n)})),t))for(;s<u;s++)t(e[s],n,a?r:r.call(e[s],s,t(e[s],n)));return i?e:l?t.call(e):u?t(e[0],n):o},z=/^-ms-/,W=/-([a-z])/g;function U(e,t){return t.toUpperCase()}function X(e){return e.replace(z,"ms-").replace(W,U)}function Q(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType}function Y(){this.expando=S.expando+Y.uid++}Y.uid=1,Y.prototype={cache:function(e){var t=e[this.expando];return t||(t={},Q(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,i=this.cache(e);if("string"==typeof t)i[X(t)]=n;else for(r in t)i[X(r)]=t[r];return i},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][X(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(X):(t=X(t))in r?[t]:t.match(H)||[]).length;for(;n--;)delete r[t[n]]}void 0!==t&&!S.isEmptyObject(r)||(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!S.isEmptyObject(t)}};var V=new Y,G=new Y,K=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Z=/[A-Z]/g;function J(e,t,n){var r,i;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(Z,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n="true"===(i=n)||"false"!==i&&("null"===i?null:i===+i+"?+i:K.test(i)?JSON.parse(i):i)}catch(e){}G.set(e,t,n)}else n=void 0;return n}S.extend({hasData:function(e){return G.hasData(e)||V.hasData(e)},data:function(e,t,n){return G.access(e,t,n)},removeData:function(e,t){G.remove(e,t)},_data:function(e,t,n){return V.access(e,t,n)},_removeData:function(e,t){V.remove(e,t)}}),S.fn.extend({data:function(n,e){var t,r,i,o=this[0],a=o&&o.attributes;if(void 0!==n)return"object"==typeof n?this.each(function(){G.set(this,n)}):F(this,function(e){var t;if(o&&void 0===e)return void 0!==(t=G.get(o,n))||void 0!==(t=J(o,n))?t:void 0;this.each(function(){G.set(this,n,e)})},null,e,1<arguments.length,null,!0);if(this.length&&(i=G.get(o),1===o.nodeType&&!V.get(o,"hasDataAttrs"))){for(t=a.length;t--;)a[t]&&0===(r=a[t].name).indexOf("data-")&&(r=X(r.slice(5)),J(o,r,i[r]));V.set(o,"hasDataAttrs",!0)}return i},removeData:function(e){return this.each(function(){G.remove(this,e)})}}),S.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=V.get(e,t),n&&(!r||Array.isArray(n)?r=V.access(e,t,S.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){t=t||"fx";var n=S.queue(e,t),r=n.length,i=n.shift(),o=S._queueHooks(e,t);"inprogress"===i&&(i=n.shift(),r--),i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,function(){S.dequeue(e,t)},o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return V.get(e,n)||V.access(e,n,{empty:S.Callbacks("once memory").add(function(){V.remove(e,[t+"queue",n])})})}}),S.fn.extend({queue:function(t,n){var e=2;return"string"!=typeof t&&(n=t,t="fx",e--),arguments.length<e?S.queue(this[0],t):void 0===n?this:this.each(function(){var e=S.queue(this,t,n);S._queueHooks(this,t),"fx"===t&&"inprogress"!==e[0]&&S.dequeue(this,t)})},dequeue:function(e){return this.each(function(){S.dequeue(this,e)})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){function n(){--i||o.resolveWith(a,[a])}var r,i=1,o=S.Deferred(),a=this,s=this.length;for("string"!=typeof e&&(t=e,e=void 0),e=e||"fx";s--;)(r=V.get(a[s],e+"queueHooks"))&&r.empty&&(i++,r.empty.add(n));return n(),o.promise(t)}});var ee=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,te=new RegExp("^(?:([+-])=|)("+ee+")([a-z%]*)$","i"),ne=["Top","Right","Bottom","Left"],re=E.documentElement,ie=function(e){return S.contains(e.ownerDocument,e)},oe={composed:!0};re.getRootNode&&(ie=function(e){return S.contains(e.ownerDocument,e)||e.getRootNode(oe)===e.ownerDocument});var ae=function(e,t){return"none"===(e=t||e).style.display||"===e.style.display&&ie(e)&&"none"===S.css(e,"display")};function se(e,t,n,r){var i,o,a=20,s=r?function(){return r.cur()}:function(){return S.css(e,t,")},u=s(),l=n&&n[3]||(S.cssNumber[t]?":"px"),c=e.nodeType&&(S.cssNumber[t]||"px"!==l&&+u)&&te.exec(S.css(e,t));if(c&&c[3]!==l){for(u/=2,l=l||c[3],c=+u||1;a--;)S.style(e,t,c+l),(1-o)*(1-(o=s()/u||.5))<=0&&(a=0),c/=o;c*=2,S.style(e,t,c+l),n=n||[]}return n&&(c=+c||+u||0,i=n[1]?c+(n[1]+1)*n[2]:+n[2],r&&(r.unit=l,r.start=c,r.end=i)),i}var ue={};function le(e,t){for(var n,r,i,o,a,s,u,l=[],c=0,f=e.length;c<f;c++)(r=e[c]).style&&(n=r.style.display,t?("none"===n&&(l[c]=V.get(r,"display")||null,l[c]||(r.style.display=")),"===r.style.display&&ae(r)&&(l[c]=(u=a=o=void 0,a=(i=r).ownerDocument,s=i.nodeName,(u=ue[s])||(o=a.body.appendChild(a.createElement(s)),u=S.css(o,"display"),o.parentNode.removeChild(o),"none"===u&&(u="block"),ue[s]=u)))):"none"!==n&&(l[c]="none",V.set(r,"display",n)));for(c=0;c<f;c++)null!=l[c]&&(e[c].style.display=l[c]);return e}S.fn.extend({show:function(){return le(this,!0)},hide:function(){return le(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each(function(){ae(this)?S(this).show():S(this).hide()})}});var ce,fe,de=/^(?:checkbox|radio)$/i,pe=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="<textarea>x</textarea>",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="<option></option>",y.option=!!ce.lastChild;var me={thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,","]};function ge(e,t){var n;return n=void 0!==e.getElementsByTagName?e.getElementsByTagName(t||"*"):void 0!==e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ve(e,t){for(var n=0,r=e.length;n<r;n++)V.set(e[n],"globalEval",!t||V.get(t[n],"globalEval"))}me.tbody=me.tfoot=me.colgroup=me.caption=me.thead,me.th=me.td,y.option||(me.optgroup=me.option=[1,"<select multiple='multiple'>","</select>"]);var ye=/<|&#?\w+;/;function be(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),d=[],p=0,h=e.length;p<h;p++)if((o=e[p])||0===o)if("object"===w(o))S.merge(d,o.nodeType?[o]:o);else if(ye.test(o)){for(a=a||f.appendChild(t.createElement("div")),s=(pe.exec(o)||[","])[1].toLowerCase(),u=me[s]||me._default,a.innerHTML=u[1]+S.htmlPrefilter(o)+u[2],c=u[0];c--;)a=a.lastChild;S.merge(d,a.childNodes),(a=f.firstChild).textContent="}else d.push(t.createTextNode(o));for(f.textContent=",p=0;o=d[p++];)if(r&&-1<S.inArray(o,r))i&&i.push(o);else if(l=ie(o),a=ge(f.appendChild(o),"script"),l&&ve(a),n)for(c=0;o=a[c++];)he.test(o.type||")&&n.push(o);return f}var xe=/^key/,we=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ce=/^([^.]*)(?:\.(.+)|)/;function Te(){return!0}function Ee(){return!1}function Se(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function ke(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)ke(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Ee;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return S().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=S.guid++)),e.each(function(){S.event.add(this,t,i,r,n)})}function Ae(e,i,o){o?(V.set(e,i,!1),S.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=V.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(S.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),V.set(this,i,r),t=o(this,i),this[i](),r!==(n=V.get(this,i))||t?V.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(V.set(this,i,{value:S.event.trigger(S.extend(r[0],S.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===V.get(e,i)&&S.event.add(e,i,Te)}S.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,d,p,h,m,g=V.get(t);if(Q(t))for(n.handler&&(n=(o=n).handler,i=o.selector),i&&S.find.matchesSelector(re,i),n.guid||(n.guid=S.guid++),(u=g.events)||(u=g.events=Object.create(null)),(a=g.handle)||(a=g.handle=function(e){return void 0!==S&&S.event.triggered!==e.type?S.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||").match(H)||["]).length;l--;)p=m=(s=Ce.exec(e[l])||[])[1],h=(s[2]||").split(".").sort(),p&&(f=S.event.special[p]||{},p=(i?f.delegateType:f.bindType)||p,f=S.event.special[p]||{},c=S.extend({type:p,origType:m,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&S.expr.match.needsContext.test(i),namespace:h.join(".")},o),(d=u[p])||((d=u[p]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(p,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?d.splice(d.delegateCount++,0,c):d.push(c),S.event.global[p]=!0)},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,d,p,h,m,g=V.hasData(e)&&V.get(e);if(g&&(u=g.events)){for(l=(t=(t||").match(H)||["]).length;l--;)if(p=m=(s=Ce.exec(t[l])||[])[1],h=(s[2]||").split(".").sort(),p){for(f=S.event.special[p]||{},d=u[p=(r?f.delegateType:f.bindType)||p]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=d.length;o--;)c=d[o],!i&&m!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(d.splice(o,1),c.selector&&d.delegateCount--,f.remove&&f.remove.call(e,c));a&&!d.length&&(f.teardown&&!1!==f.teardown.call(e,h,g.handle)||S.removeEvent(e,p,g.handle),delete u[p])}else for(p in u)S.event.remove(e,p+t[l],n,r,!0);S.isEmptyObject(u)&&V.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=new Array(arguments.length),u=S.event.fix(e),l=(V.get(this,"events")||Object.create(null))[u.type]||[],c=S.event.special[u.type]||{};for(s[0]=u,t=1;t<arguments.length;t++)s[t]=arguments[t];if(u.delegateTarget=this,!c.preDispatch||!1!==c.preDispatch.call(this,u)){for(a=S.event.handlers.call(this,u,l),t=0;(i=a[t++])&&!u.isPropagationStopped();)for(u.currentTarget=i.elem,n=0;(o=i.handlers[n++])&&!u.isImmediatePropagationStopped();)u.rnamespace&&!1!==o.namespace&&!u.rnamespace.test(o.namespace)||(u.handleObj=o,u.data=o.data,void 0!==(r=((S.event.special[o.origType]||{}).handle||o.handler).apply(i.elem,s))&&!1===(u.result=r)&&(u.preventDefault(),u.stopPropagation()));return c.postDispatch&&c.postDispatch.call(this,u),u.result}},handlers:function(e,t){var n,r,i,o,a,s=[],u=t.delegateCount,l=e.target;if(u&&l.nodeType&&!("click"===e.type&&1<=e.button))for(;l!==this;l=l.parentNode||this)if(1===l.nodeType&&("click"!==e.type||!0!==l.disabled)){for(o=[],a={},n=0;n<u;n++)void 0===a[i=(r=t[n]).selector+" "]&&(a[i]=r.needsContext?-1<S(i,this).index(l):S.find(i,this,null,[l]).length),a[i]&&o.push(r);o.length&&s.push({elem:l,handlers:o})}return l=this,u<t.length&&s.push({elem:l,handlers:t.slice(u)}),s},addProp:function(t,e){Object.defineProperty(S.Event.prototype,t,{enumerable:!0,configurable:!0,get:b(e)?function(){if(this.originalEvent)return e(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[t]},set:function(e){Object.defineProperty(this,t,{enumerable:!0,configurable:!0,writable:!0,value:e})}})},fix:function(e){return e[S.expando]?e:new S.Event(e)},special:{load:{noBubble:!0},click:{setup:function(e){var t=this||e;return de.test(t.type)&&t.click&&A(t,"input")&&Ae(t,"click",Te),!1},trigger:function(e){var t=this||e;return de.test(t.type)&&t.click&&A(t,"input")&&Ae(t,"click"),!0},_default:function(e){var t=e.target;return de.test(t.type)&&t.click&&A(t,"input")&&V.get(t,"click")||A(t,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},S.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},S.Event=function(e,t){if(!(this instanceof S.Event))return new S.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?Te:Ee,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&S.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[S.expando]=!0},S.Event.prototype={constructor:S.Event,isDefaultPrevented:Ee,isPropagationStopped:Ee,isImmediatePropagationStopped:Ee,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=Te,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=Te,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=Te,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},S.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,char:!0,code:!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&xe.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&we.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},S.event.addProp),S.each({focus:"focusin",blur:"focusout"},function(e,t){S.event.special[e]={setup:function(){return Ae(this,e,Se),!1},trigger:function(){return Ae(this,e),!0},delegateType:t}}),S.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(e,i){S.event.special[e]={delegateType:i,bindType:i,handle:function(e){var t,n=e.relatedTarget,r=e.handleObj;return n&&(n===this||S.contains(this,n))||(e.type=r.origType,t=r.handler.apply(this,arguments),e.type=i),t}}}),S.fn.extend({on:function(e,t,n,r){return ke(this,e,t,n,r)},one:function(e,t,n,r){return ke(this,e,t,n,r,1)},off:function(e,t,n){var r,i;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,S(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"!=typeof e)return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=Ee),this.each(function(){S.event.remove(this,e,n,t)});for(i in e)this.off(i,t,e[i]);return this}});var Ne=/<script|<style|<link/i,je=/checked\s*(?:[^=]|=\s*.checked.)/i,Ie=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Le(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Oe(e){return"true/"===(e.type||").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Pe(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(V.hasData(e)&&(s=V.get(e).events))for(i in V.remove(t,"handle events"),s)for(n=0,r=s[i].length;n<r;n++)S.event.add(t,i,s[i][n]);G.hasData(e)&&(o=G.access(e),a=S.extend({},o),G.set(t,a))}}function He(n,r,i,o){r=g(r);var e,t,a,s,u,l,c=0,f=n.length,d=f-1,p=r[0],h=b(p);if(h||1<f&&"string"==typeof p&&!y.checkClone&&je.test(p))return n.each(function(e){var t=n.eq(e);h&&(r[0]=p.call(this,e,t.html())),He(t,r,i,o)});if(f&&(t=(e=be(r,n[0].ownerDocument,!1,n,o)).firstChild,1===e.childNodes.length&&(e=t),t||o)){for(s=(a=S.map(ge(e,"script"),De)).length;c<f;c++)u=e,c!==d&&(u=S.clone(u,!0,!0),s&&S.merge(a,ge(u,"script"))),i.call(n[c],u,c);if(s)for(l=a[a.length-1].ownerDocument,S.map(a,Oe),c=0;c<s;c++)u=a[c],he.test(u.type||")&&!V.access(u,"globalEval")&&S.contains(l,u)&&(u.src&&"module"!==(u.type||").toLowerCase()?S._evalUrl&&!u.noModule&&S._evalUrl(u.src,{nonce:u.nonce||u.getAttribute("nonce")},l):x(u.textContent.replace(Ie,"),u,l))}return n}function qe(e,t,n){for(var r,i=t?S.filter(t,e):e,o=0;null!=(r=i[o]);o++)n||1!==r.nodeType||S.cleanData(ge(r)),r.parentNode&&(n&&ie(r)&&ve(ge(r,"script")),r.parentNode.removeChild(r));return e}S.extend({htmlPrefilter:function(e){return e},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=ie(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||S.isXMLDoc(e)))for(a=ge(c),r=0,i=(o=ge(e)).length;r<i;r++)s=o[r],u=a[r],"input"===(l=u.nodeName.toLowerCase())&&de.test(s.type)?u.checked=s.checked:"input"!==l&&"textarea"!==l||(u.defaultValue=s.defaultValue);if(t)if(n)for(o=o||ge(e),a=a||ge(c),r=0,i=o.length;r<i;r++)Pe(o[r],a[r]);else Pe(e,c);return 0<(a=ge(c,"script")).length&&ve(a,!f&&ge(e,"script")),c},cleanData:function(e){for(var t,n,r,i=S.event.special,o=0;void 0!==(n=e[o]);o++)if(Q(n)){if(t=n[V.expando]){if(t.events)for(r in t.events)i[r]?S.event.remove(n,r):S.removeEvent(n,r,t.handle);n[V.expando]=void 0}n[G.expando]&&(n[G.expando]=void 0)}}}),S.fn.extend({detach:function(e){return qe(this,e,!0)},remove:function(e){return qe(this,e)},text:function(e){return F(this,function(e){return void 0===e?S.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)})},null,e,arguments.length)},append:function(){return He(this,arguments,function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Le(this,e).appendChild(e)})},prepend:function(){return He(this,arguments,function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Le(this,e);t.insertBefore(e,t.firstChild)}})},before:function(){return He(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return He(this,arguments,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(S.cleanData(ge(e,!1)),e.textContent=");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map(function(){return S.clone(this,e,t)})},html:function(e){return F(this,function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Ne.test(e)&&!me[(pe.exec(e)||[","])[1].toLowerCase()]){e=S.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(S.cleanData(ge(t,!1)),t.innerHTML=e);t=0}catch(e){}}t&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(){var n=[];return He(this,arguments,function(e){var t=this.parentNode;S.inArray(this,n)<0&&(S.cleanData(ge(this)),t&&t.replaceChild(e,this))},n)}}),S.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,a){S.fn[e]=function(e){for(var t,n=[],r=S(e),i=r.length-1,o=0;o<=i;o++)t=o===i?this:this.clone(!0),S(r[o])[a](t),u.apply(n,t.get());return this.pushStack(n)}});function Me(e,t,n){var r,i,o={};for(i in t)o[i]=e.style[i],e.style[i]=t[i];for(i in r=n.call(e),t)e.style[i]=o[i];return r}var _e,$e,Re,Be,Fe,ze,We,Ue,Xe=new RegExp("^("+ee+")(?!px)[a-z%]+$","i"),Qe=function(e){var t=e.ownerDocument.defaultView;return t&&t.opener||(t=T),t.getComputedStyle(e)},Ye=new RegExp(ne.join("|"),"i");function Ve(){if(Ue){We.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",Ue.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",re.appendChild(We).appendChild(Ue);var e=T.getComputedStyle(Ue);_e="1%"!==e.top,ze=12===Ge(e.marginLeft),Ue.style.right="60%",Be=36===Ge(e.right),$e=36===Ge(e.width),Ue.style.position="absolute",Re=12===Ge(Ue.offsetWidth/3),re.removeChild(We),Ue=null}}function Ge(e){return Math.round(parseFloat(e))}function Ke(e,t,n){var r,i,o,a,s=e.style;return(n=n||Qe(e))&&("!==(a=n.getPropertyValue(t)||n[t])||ie(e)||(a=S.style(e,t)),!y.pixelBoxStyles()&&Xe.test(a)&&Ye.test(t)&&(r=s.width,i=s.minWidth,o=s.maxWidth,s.minWidth=s.maxWidth=s.width=a,a=n.width,s.width=r,s.minWidth=i,s.maxWidth=o)),void 0!==a?a+":a}function Ze(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}We=E.createElement("div"),(Ue=E.createElement("div")).style&&(Ue.style.backgroundClip="content-box",Ue.cloneNode(!0).style.backgroundClip=",y.clearCloneStyle="content-box"===Ue.style.backgroundClip,S.extend(y,{boxSizingReliable:function(){return Ve(),$e},pixelBoxStyles:function(){return Ve(),Be},pixelPosition:function(){return Ve(),_e},reliableMarginLeft:function(){return Ve(),ze},scrollboxSize:function(){return Ve(),Re},reliableTrDimensions:function(){var e,t,n,r;return null==Fe&&(e=E.createElement("table"),t=E.createElement("tr"),n=E.createElement("div"),e.style.cssText="position:absolute;left:-11111px",t.style.height="1px",n.style.height="9px",re.appendChild(e).appendChild(t).appendChild(n),r=T.getComputedStyle(t),Fe=3<parseInt(r.height),re.removeChild(e)),Fe}}));var Je=["Webkit","Moz","ms"],et=E.createElement("div").style,tt={};function nt(e){var t=S.cssProps[e]||tt[e];return t||(e in et?e:tt[e]=function(e){for(var t=e[0].toUpperCase()+e.slice(1),n=Je.length;n--;)if((e=Je[n]+t)in et)return e}(e)||e)}var rt=/^(none|table(?!-c[ea]).+)/,it=/^--/,ot={position:"absolute",visibility:"hidden",display:"block"},at={letterSpacing:"0",fontWeight:"400"};function
(e,t,n){var r=te.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function ut(e,t,n,r,i,o){var a="width"===t?1:0,s=0,u=0;if(n===(r?"border":"content"))return 0;for(;a<4;a+=2)"margin"===n&&(u+=S.css(e,n+ne[a],!0,i)),r?("content"===n&&(u-=S.css(e,"padding"+ne[a],!0,i)),"margin"!==n&&(u-=S.css(e,"border"+ne[a]+"Width",!0,i))):(u+=S.css(e,"padding"+ne[a],!0,i),"padding"!==n?u+=S.css(e,"border"+ne[a]+"Width",!0,i):s+=S.css(e,"border"+ne[a]+"Width",!0,i));return!r&&0<=o&&(u+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-o-u-s-.5))||0),u}function lt(e,t,n){var r=Qe(e),i=(!y.boxSizingReliable()||n)&&"border-box"===S.css(e,"boxSizing",!1,r),o=i,a=Ke(e,t,r),s="offset"+t[0].toUpperCase()+t.slice(1);if(Xe.test(a)){if(!n)return a;a="auto"}return(!y.boxSizingReliable()&&i||!y.reliableTrDimensions()&&A(e,"tr")||"auto"===a||!parseFloat(a)&&"inline"===S.css(e,"display",!1,r))&&e.getClientRects().length&&(i="border-box"===S.css(e,"boxSizing",!1,r),(o=s in e)&&(a=e[s])),(a=parseFloat(a)||0)+ut(e,t,n||(i?"border":"content"),o,r,a)+"px"}function ct(e,t,n,r,i){return new ct.prototype.init(e,t,n,r,i)}S.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Ke(e,"opacity");return"===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var i,o,a,s=X(t),u=it.test(t),l=e.style;if(u||(t=nt(s)),a=S.cssHooks[t]||S.cssHooks[s],void 0===n)return a&&"get"in a&&void 0!==(i=a.get(e,!1,r))?i:l[t];"string"===(o=typeof n)&&(i=te.exec(n))&&i[1]&&(n=se(e,t,i),o="number"),null!=n&&n==n&&("number"!==o||u||(n+=i&&i[3]||(S.cssNumber[s]?":"px")),y.clearCloneStyle||"!==n||0!==t.indexOf("background")||(l[t]="inherit"),a&&"set"in a&&void 0===(n=a.set(e,n,r))||(u?l.setProperty(t,n):l[t]=n))}},css:function(e,t,n,r){var i,o,a,s=X(t);return it.test(t)||(t=nt(s)),(a=S.cssHooks[t]||S.cssHooks[s])&&"get"in a&&(i=a.get(e,!0,n)),void 0===i&&(i=Ke(e,t,r)),"normal"===i&&t in at&&(i=at[t]),"===n||n?(o=parseFloat(i),!0===n||isFinite(o)?o||0:i):i}}),S.each(["height","width"],function(e,u){S.cssHooks[u]={get:function(e,t,n){if(t)return!rt.test(S.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?lt(e,u,n):Me(e,ot,function(){return lt(e,u,n)})},set:function(e,t,n){var r,i=Qe(e),o=!y.scrollboxSize()&&"absolute"===i.position,a=(o||n)&&"border-box"===S.css(e,"boxSizing",!1,i),s=n?ut(e,u,n,a,i):0;return a&&o&&(s-=Math.ceil(e["offset"+u[0].toUpperCase()+u.slice(1)]-parseFloat(i[u])-ut(e,u,"border",!1,i)-.5)),s&&(r=te.exec(t))&&"px"!==(r[3]||"px")&&(e.style[u]=t,t=S.css(e,u)),st(0,t,s)}}}),S.cssHooks.marginLeft=Ze(y.reliableMarginLeft,function(e,t){if(t)return(parseFloat(Ke(e,"marginLeft"))||e.getBoundingClientRect().left-Me(e,{marginLeft:0},function(){return e.getBoundingClientRect().left}))+"px"}),S.each({margin:",padding:",border:"Width"},function(i,o){S.cssHooks[i+o]={expand:function(e){for(var t=0,n={},r="string"==typeof e?e.split(" "):[e];t<4;t++)n[i+ne[t]+o]=r[t]||r[t-2]||r[0];return n}},"margin"!==i&&(S.cssHooks[i+o].set=st)}),S.fn.extend({css:function(e,t){return F(this,function(e,t,n){var r,i,o={},a=0;if(Array.isArray(t)){for(r=Qe(e),i=t.length;a<i;a++)o[t[a]]=S.css(e,t[a],!1,r);return o}return void 0!==n?S.style(e,t,n):S.css(e,t)},e,t,1<arguments.length)}}),((S.Tween=ct).prototype={constructor:ct,init:function(e,t,n,r,i,o){this.elem=e,this.prop=n,this.easing=i||S.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=o||(S.cssNumber[n]?":"px")},cur:function(){var e=ct.propHooks[this.prop];return e&&e.get?e.get(this):ct.propHooks._default.get(this)},run:function(e){var t,n=ct.propHooks[this.prop];return this.options.duration?this.pos=t=S.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):ct.propHooks._default.set(this),this}}).init.prototype=ct.prototype,(ct.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=S.css(e.elem,e.prop,"))&&"auto"!==t?t:0},set:function(e){S.fx.step[e.prop]?S.fx.step[e.prop](e):1!==e.elem.nodeType||!S.cssHooks[e.prop]&&null==e.elem.style[nt(e.prop)]?e.elem[e.prop]=e.now:S.style(e.elem,e.prop,e.now+e.unit)}}}).scrollTop=ct.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},S.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},S.fx=ct.prototype.init,S.fx.step={};var ft,dt,pt,ht,mt=/^(?:toggle|show|hide)$/,gt=/queueHooks$/;function vt(){dt&&(!1===E.hidden&&T.requestAnimationFrame?T.requestAnimationFrame(vt):T.setTimeout(vt,S.fx.interval),S.fx.tick())}function yt(){return T.setTimeout(function(){ft=void 0}),ft=Date.now()}function bt(e,t){var n,r=0,i={height:e};for(t=t?1:0;r<4;r+=2-t)i["margin"+(n=ne[r])]=i["padding"+n]=e;return t&&(i.opacity=i.width=e),i}function xt(e,t,n){for(var r,i=(wt.tweeners[t]||[]).concat(wt.tweeners["*"]),o=0,a=i.length;o<a;o++)if(r=i[o].call(n,t,e))return r}function wt(o,e,t){var n,a,r=0,i=wt.prefilters.length,s=S.Deferred().always(function(){delete u.elem}),u=function(){if(a)return!1;for(var e=ft||yt(),t=Math.max(0,l.startTime+l.duration-e),n=1-(t/l.duration||0),r=0,i=l.tweens.length;r<i;r++)l.tweens[r].run(n);return s.notifyWith(o,[l,n,t]),n<1&&i?t:(i||s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l]),!1)},l=s.promise({elem:o,props:S.extend({},e),opts:S.extend(!0,{specialEasing:{},easing:S.easing._default},t),originalProperties:e,originalOptions:t,startTime:ft||yt(),duration:t.duration,tweens:[],createTween:function(e,t){var n=S.Tween(o,l.opts,e,t,l.opts.specialEasing[e]||l.opts.easing);return l.tweens.push(n),n},stop:function(e){var t=0,n=e?l.tweens.length:0;if(a)return this;for(a=!0;t<n;t++)l.tweens[t].run(1);return e?(s.notifyWith(o,[l,1,0]),s.resolveWith(o,[l,e])):s.rejectWith(o,[l,e]),this}}),c=l.props;for(!function(e,t){var n,r,i,o,a;for(n in e)if(i=t[r=X(n)],o=e[n],Array.isArray(o)&&(i=o[1],o=e[n]=o[0]),n!==r&&(e[r]=o,delete e[n]),(a=S.cssHooks[r])&&"expand"in a)for(n in o=a.expand(o),delete e[r],o)n in e||(e[n]=o[n],t[n]=i);else t[r]=i}(c,l.opts.specialEasing);r<i;r++)if(n=wt.prefilters[r].call(l,o,c,l.opts))return b(n.stop)&&(S._queueHooks(l.elem,l.opts.queue).stop=n.stop.bind(n)),n;return S.map(c,xt,l),b(l.opts.start)&&l.opts.start.call(o,l),l.progress(l.opts.progress).done(l.opts.done,l.opts.complete).fail(l.opts.fail).always(l.opts.always),S.fx.timer(S.extend(u,{elem:o,anim:l,queue:l.opts.queue})),l}S.Animation=S.extend(wt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return se(n.elem,e,te.exec(t),n),n}]},tweener:function(e,t){for(var n,r=0,i=(e=b(e)?(t=e,["*"]):e.match(H)).length;r<i;r++)n=e[r],wt.tweeners[n]=wt.tweeners[n]||[],wt.tweeners[n].unshift(t)},prefilters:[function(e,t,n){var r,i,o,a,s,u,l,c,f="width"in t||"height"in t,d=this,p={},h=e.style,m=e.nodeType&&ae(e),g=V.get(e,"fxshow");for(r in n.queue||(null==(a=S._queueHooks(e,"fx")).unqueued&&(a.unqueued=0,s=a.empty.fire,a.empty.fire=function(){a.unqueued||s()}),a.unqueued++,d.always(function(){d.always(function(){a.unqueued--,S.queue(e,"fx").length||a.empty.fire()})})),t)if(i=t[r],mt.test(i)){if(delete t[r],o=o||"toggle"===i,i===(m?"hide":"show")){if("show"!==i||!g||void 0===g[r])continue;m=!0}p[r]=g&&g[r]||S.style(e,r)}if((u=!S.isEmptyObject(t))||!S.isEmptyObject(p))for(r in f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(l=g&&g.display)&&(l=V.get(e,"display")),"none"===(c=S.css(e,"display"))&&(l?c=l:(le([e],!0),l=e.style.display||l,c=S.css(e,"display"),le([e]))),("inline"===c||"inline-block"===c&&null!=l)&&"none"===S.css(e,"float")&&(u||(d.done(function(){h.display=l}),null==l&&(c=h.display,l="none"===c?":c)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",d.always(function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]})),u=!1,p)u||(g?"hidden"in g&&(m=g.hidden):g=V.access(e,"fxshow",{display:l}),o&&(g.hidden=!m),m&&le([e],!0),d.done(function(){for(r in m||le([e]),V.remove(e,"fxshow"),p)S.style(e,r,p[r])})),u=xt(m?g[r]:0,r,d),r in g||(g[r]=u.start,m&&(u.end=u.start,u.start=0))}],prefilter:function(e,t){t?wt.prefilters.unshift(e):wt.prefilters.push(e)}}),S.speed=function(e,t,n){var r=e&&"object"==typeof e?S.extend({},e):{complete:n||!n&&t||b(e)&&e,duration:e,easing:n&&t||t&&!b(t)&&t};return S.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in S.fx.speeds?r.duration=S.fx.speeds[r.duration]:r.duration=S.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){b(r.old)&&r.old.call(this),r.queue&&S.dequeue(this,r.queue)},r},S.fn.extend({fadeTo:function(e,t,n,r){return this.filter(ae).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(t,e,n,r){function i(){var e=wt(this,S.extend({},t),a);(o||V.get(this,"finish"))&&e.stop(!0)}var o=S.isEmptyObject(t),a=S.speed(e,n,r);return i.finish=i,o||!1===a.queue?this.each(i):this.queue(a.queue,i)},stop:function(i,e,o){function a(e){var t=e.stop;delete e.stop,t(o)}return"string"!=typeof i&&(o=e,e=i,i=void 0),e&&this.queue(i||"fx",[]),this.each(function(){var e=!0,t=null!=i&&i+"queueHooks",n=S.timers,r=V.get(this);if(t)r[t]&&r[t].stop&&a(r[t]);else for(t in r)r[t]&&r[t].stop&&gt.test(t)&&a(r[t]);for(t=n.length;t--;)n[t].elem!==this||null!=i&&n[t].queue!==i||(n[t].anim.stop(o),e=!1,n.splice(t,1));!e&&o||S.dequeue(this,i)})},finish:function(a){return!1!==a&&(a=a||"fx"),this.each(function(){var e,t=V.get(this),n=t[a+"queue"],r=t[a+"queueHooks"],i=S.timers,o=n?n.length:0;for(t.finish=!0,S.queue(this,a,[]),r&&r.stop&&r.stop.call(this,!0),e=i.length;e--;)i[e].elem===this&&i[e].queue===a&&(i[e].anim.stop(!0),i.splice(e,1));for(e=0;e<o;e++)n[e]&&n[e].finish&&n[e].finish.call(this);delete t.finish})}}),S.each(["toggle","show","hide"],function(e,r){var i=S.fn[r];S.fn[r]=function(e,t,n){return null==e||"boolean"==typeof e?i.apply(this,arguments):this.animate(bt(r,!0),e,t,n)}}),S.each({slideDown:bt("show"),slideUp:bt("hide"),slideToggle:bt("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(e,r){S.fn[e]=function(e,t,n){return this.animate(r,e,t,n)}}),S.timers=[],S.fx.tick=function(){var e,t=0,n=S.timers;for(ft=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||S.fx.stop(),ft=void 0},S.fx.timer=function(e){S.timers.push(e),S.fx.start()},S.fx.interval=13,S.fx.start=function(){dt||(dt=!0,vt())},S.fx.stop=function(){dt=null},S.fx.speeds={slow:600,fast:200,_default:400},S.fn.delay=function(r,e){return r=S.fx&&S.fx.speeds[r]||r,e=e||"fx",this.queue(e,function(e,t){var n=T.setTimeout(e,r);t.stop=function(){T.clearTimeout(n)}})},pt=E.createElement("input"),ht=E.createElement("select").appendChild(E.createElement("option")),pt.type="checkbox",y.checkOn="!==pt.value,y.optSelected=ht.selected,(pt=E.createElement("input")).value="t",pt.type="radio",y.radioValue="t"===pt.value;var Ct,Tt=S.expr.attrHandle;S.fn.extend({attr:function(e,t){return F(this,S.attr,e,t,1<arguments.length)},removeAttr:function(e){return this.each(function(){S.removeAttr(this,e)})}}),S.extend({attr:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return void 0===e.getAttribute?S.prop(e,t,n):(1===o&&S.isXMLDoc(e)||(i=S.attrHooks[t.toLowerCase()]||(S.expr.match.bool.test(t)?Ct:void 0)),void 0!==n?null===n?void S.removeAttr(e,t):i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:(e.setAttribute(t,n+"),n):!(i&&"get"in i&&null!==(r=i.get(e,t)))&&null==(r=S.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!y.radioValue&&"radio"===t&&A(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,i=t&&t.match(H);if(i&&1===e.nodeType)for(;n=i[r++];)e.removeAttribute(n)}}),Ct={set:function(e,t,n){return!1===t?S.removeAttr(e,n):e.setAttribute(n,n),n}},S.each(S.expr.match.bool.source.match(/\w+/g),function(e,t){var a=Tt[t]||S.find.attr;Tt[t]=function(e,t,n){var r,i,o=t.toLowerCase();return n||(i=Tt[o],Tt[o]=r,r=null!=a(e,t,n)?o:null,Tt[o]=i),r}});var Et=/^(?:input|select|textarea|button)$/i,St=/^(?:a|area)$/i;function kt(e){return(e.match(H)||[]).join(" ")}function At(e){return e.getAttribute&&e.getAttribute("class")||"}function Nt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(H)||[]}S.fn.extend({prop:function(e,t){return F(this,S.prop,e,t,1<arguments.length)},removeProp:function(e){return this.each(function(){delete this[S.propFix[e]||e]})}}),S.extend({prop:function(e,t,n){var r,i,o=e.nodeType;if(3!==o&&8!==o&&2!==o)return 1===o&&S.isXMLDoc(e)||(t=S.propFix[t]||t,i=S.propHooks[t]),void 0!==n?i&&"set"in i&&void 0!==(r=i.set(e,n,t))?r:e[t]=n:i&&"get"in i&&null!==(r=i.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=S.find.attr(e,"tabindex");return t?parseInt(t,10):Et.test(e.nodeName)||St.test(e.nodeName)&&e.href?0:-1}}},propFix:{for:"htmlFor",class:"className"}}),y.optSelected||(S.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),S.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){S.propFix[this.toLowerCase()]=this}),S.fn.extend({addClass:function(t){var e,n,r,i,o,a,s,u=0;if(b(t))return this.each(function(e){S(this).addClass(t.call(this,e,At(this)))});if((e=Nt(t)).length)for(;n=this[u++];)if(i=At(n),r=1===n.nodeType&&" "+kt(i)+" "){for(a=0;o=e[a++];)r.indexOf(" "+o+" ")<0&&(r+=o+" ");i!==(s=kt(r))&&n.setAttribute("class",s)}return this},removeClass:function(t){var e,n,r,i,o,a,s,u=0;if(b(t))return this.each(function(e){S(this).removeClass(t.call(this,e,At(this)))});if(!arguments.length)return this.attr("class",");if((e=Nt(t)).length)for(;n=this[u++];)if(i=At(n),r=1===n.nodeType&&" "+kt(i)+" "){for(a=0;o=e[a++];)for(;-1<r.indexOf(" "+o+" ");)r=r.replace(" "+o+" "," ");i!==(s=kt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(i,t){var o=typeof i,a="string"==o||Array.isArray(i);return"boolean"==typeof t&&a?t?this.addClass(i):this.removeClass(i):b(i)?this.each(function(e){S(this).toggleClass(i.call(this,e,At(this),t),t)}):this.each(function(){var e,t,n,r;if(a)for(t=0,n=S(this),r=Nt(i);e=r[t++];)n.hasClass(e)?n.removeClass(e):n.addClass(e);else void 0!==i&&"boolean"!=o||((e=At(this))&&V.set(this,"__className__",e),this.setAttribute&&this.setAttribute("class",!e&&!1!==i&&V.get(this,"__className__")||"))})},hasClass:function(e){var t,n,r=0;for(t=" "+e+" ";n=this[r++];)if(1===n.nodeType&&-1<(" "+kt(At(n))+" ").indexOf(t))return!0;return!1}});var jt=/\r/g;S.fn.extend({val:function(n){var r,e,i,t=this[0];return arguments.length?(i=b(n),this.each(function(e){var t;1===this.nodeType&&(null==(t=i?n.call(this,e,S(this).val()):n)?t=":"number"==typeof t?t+=":Array.isArray(t)&&(t=S.map(t,function(e){return null==e?":e+"})),(r=S.valHooks[this.type]||S.valHooks[this.nodeName.toLowerCase()])&&"set"in r&&void 0!==r.set(this,t,"value")||(this.value=t))})):t?(r=S.valHooks[t.type]||S.valHooks[t.nodeName.toLowerCase()])&&"get"in r&&void 0!==(e=r.get(t,"value"))?e:"string"==typeof(e=t.value)?e.replace(jt,"):null==e?":e:void 0}}),S.extend({valHooks:{option:{get:function(e){var t=S.find.attr(e,"value");return null!=t?t:kt(S.text(e))}},select:{get:function(e){var t,n,r,i=e.options,o=e.selectedIndex,a="select-one"===e.type,s=a?null:[],u=a?o+1:i.length;for(r=o<0?u:a?o:0;r<u;r++)if(((n=i[r]).selected||r===o)&&!n.disabled&&(!n.parentNode.disabled||!A(n.parentNode,"optgroup"))){if(t=S(n).val(),a)return t;s.push(t)}return s},set:function(e,t){for(var n,r,i=e.options,o=S.makeArray(t),a=i.length;a--;)((r=i[a]).selected=-1<S.inArray(S.valHooks.option.get(r),o))&&(n=!0);return n||(e.selectedIndex=-1),o}}}}),S.each(["radio","checkbox"],function(){S.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=-1<S.inArray(S(e).val(),t)}},y.checkOn||(S.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})}),y.focusin="onfocusin"in T;function It(e){e.stopPropagation()}var Lt=/^(?:focusinfocus|focusoutblur)$/;S.extend(S.event,{trigger:function(e,t,n,r){var i,o,a,s,u,l,c,f,d=[n||E],p=v.call(e,"type")?e.type:e,h=v.call(e,"namespace")?e.namespace.split("."):[];if(o=f=a=n=n||E,3!==n.nodeType&&8!==n.nodeType&&!Lt.test(p+S.event.triggered)&&(-1<p.indexOf(".")&&(p=(h=p.split(".")).shift(),h.sort()),u=p.indexOf(":")<0&&"on"+p,(e=e[S.expando]?e:new S.Event(p,"object"==typeof e&&e)).isTrigger=r?2:3,e.namespace=h.join("."),e.rnamespace=e.namespace?new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,e.result=void 0,e.target||(e.target=n),t=null==t?[e]:S.makeArray(t,[e]),c=S.event.special[p]||{},r||!c.trigger||!1!==c.trigger.apply(n,t))){if(!r&&!c.noBubble&&!m(n)){for(s=c.delegateType||p,Lt.test(s+p)||(o=o.parentNode);o;o=o.parentNode)d.push(o),a=o;a===(n.ownerDocument||E)&&d.push(a.defaultView||a.parentWindow||T)}for(i=0;(o=d[i++])&&!e.isPropagationStopped();)f=o,e.type=1<i?s:c.bindType||p,(l=(V.get(o,"events")||Object.create(null))[e.type]&&V.get(o,"handle"))&&l.apply(o,t),(l=u&&o[u])&&l.apply&&Q(o)&&(e.result=l.apply(o,t),!1===e.result&&e.preventDefault());return e.type=p,r||e.isDefaultPrevented()||c._default&&!1!==c._default.apply(d.pop(),t)||!Q(n)||u&&b(n[p])&&!m(n)&&((a=n[u])&&(n[u]=null),S.event.triggered=p,e.isPropagationStopped()&&f.addEventListener(p,It),n[p](),e.isPropagationStopped()&&f.removeEventListener(p,It),S.event.triggered=void 0,a&&(n[u]=a)),e.result}},simulate:function(e,t,n){var r=S.extend(new S.Event,n,{type:e,isSimulated:!0});S.event.trigger(r,null,t)}}),S.fn.extend({trigger:function(e,t){return this.each(function(){S.event.trigger(e,t,this)})},triggerHandler:function(e,t){var n=this[0];if(n)return S.event.trigger(e,t,n,!0)}}),y.focusin||S.each({focus:"focusin",blur:"focusout"},function(n,r){function i(e){S.event.simulate(r,e.target,S.event.fix(e))}S.event.special[r]={setup:function(){var e=this.ownerDocument||this.document||this,t=V.access(e,r);t||e.addEventListener(n,i,!0),V.access(e,r,(t||0)+1)},teardown:function(){var e=this.ownerDocument||this.document||this,t=V.access(e,r)-1;t?V.access(e,r,t):(e.removeEventListener(n,i,!0),V.remove(e,r))}}});var Dt=T.location,Ot={guid:Date.now()},Pt=/\?/;S.parseXML=function(e){var t;if(!e||"string"!=typeof e)return null;try{t=(new T.DOMParser).parseFromString(e,"text/xml")}catch(e){t=void 0}return t&&!t.getElementsByTagName("parsererror").length||S.error("Invalid XML: "+e),t};var Ht=/\[\]$/,qt=/\r?\n/g,Mt=/^(?:submit|button|image|reset|file)$/i,_t=/^(?:input|select|textarea|keygen)/i;function $t(n,e,r,i){var t;if(Array.isArray(e))S.each(e,function(e,t){r||Ht.test(n)?i(n,t):$t(n+"["+("object"==typeof t&&null!=t?e:")+"]",t,r,i)});else if(r||"object"!==w(e))i(n,e);else for(t in e)$t(n+"["+t+"]",e[t],r,i)}S.param=function(e,t){function n(e,t){var n=b(t)?t():t;i[i.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?":n)}var r,i=[];if(null==e)return";if(Array.isArray(e)||e.jquery&&!S.isPlainObject(e))S.each(e,function(){n(this.name,this.value)});else for(r in e)$t(r,e[r],t,n);return i.join("&")},S.fn.extend({serialize:function(){return S.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var e=S.prop(this,"elements");return e?S.makeArray(e):this}).filter(function(){var e=this.type;return this.name&&!S(this).is(":disabled")&&_t.test(this.nodeName)&&!Mt.test(e)&&(this.checked||!de.test(e))}).map(function(e,t){var n=S(this).val();return null==n?null:Array.isArray(n)?S.map(n,function(e){return{name:t.name,value:e.replace(qt,"\r\n")}}):{name:t.name,value:n.replace(qt,"\r\n")}}).get()}});var Rt=/%20/g,Bt=/#.*$/,Ft=/([?&])_=[^&]*/,zt=/^(.*?):[ \t]*([^\r\n]*)$/gm,Wt=/^(?:GET|HEAD)$/,Ut=/^\/\//,Xt={},Qt={},Yt="*/".concat("*"),Vt=E.createElement("a");function Gt(o){return function(e,t){"string"!=typeof e&&(t=e,e="*");var n,r=0,i=e.toLowerCase().match(H)||[];if(b(t))for(;n=i[r++];)"+"===n[0]?(n=n.slice(1)||"*",(o[n]=o[n]||[]).unshift(t)):(o[n]=o[n]||[]).push(t)}}function Kt(t,i,o,a){var s={},u=t===Qt;function l(e){var r;return s[e]=!0,S.each(t[e]||[],function(e,t){var n=t(i,o,a);return"string"!=typeof n||u||s[n]?u?!(r=n):void 0:(i.dataTypes.unshift(n),l(n),!1)}),r}return l(i.dataTypes[0])||!s["*"]&&l("*")}function Zt(e,t){var n,r,i=S.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((i[n]?e:r=r||{})[n]=t[n]);return r&&S.extend(!0,e,r),e}Vt.href=Dt.href,S.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Dt.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(Dt.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Yt,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":S.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?Zt(Zt(e,S.ajaxSettings),t):Zt(S.ajaxSettings,e)},ajaxPrefilter:Gt(Xt),ajaxTransport:Gt(Qt),ajax:function(e,t){"object"==typeof e&&(t=e,e=void 0),t=t||{};var c,f,d,n,p,r,h,m,i,o,g=S.ajaxSetup({},t),v=g.context||g,y=g.context&&(v.nodeType||v.jquery)?S(v):S.event,b=S.Deferred(),x=S.Callbacks("once memory"),w=g.statusCode||{},a={},s={},u="canceled",C={readyState:0,getResponseHeader:function(e){var t;if(h){if(!n)for(n={};t=zt.exec(d);)n[t[1].toLowerCase()+" "]=(n[t[1].toLowerCase()+" "]||[]).concat(t[2]);t=n[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return h?d:null},setRequestHeader:function(e,t){return null==h&&(e=s[e.toLowerCase()]=s[e.toLowerCase()]||e,a[e]=t),this},overrideMimeType:function(e){return null==h&&(g.mimeType=e),this},statusCode:function(e){var t;if(e)if(h)C.always(e[C.status]);else for(t in e)w[t]=[w[t],e[t]];return this},abort:function(e){var t=e||u;return c&&c.abort(t),l(0,t),this}};if(b.promise(C),g.url=((e||g.url||Dt.href)+").replace(Ut,Dt.protocol+"//"),g.type=t.method||t.type||g.method||g.type,g.dataTypes=(g.dataType||"*").toLowerCase().match(H)||["],null==g.crossDomain){r=E.createElement("a");try{r.href=g.url,r.href=r.href,g.crossDomain=Vt.protocol+"//"+Vt.host!=r.protocol+"//"+r.host}catch(e){g.crossDomain=!0}}if(g.data&&g.processData&&"string"!=typeof g.data&&(g.data=S.param(g.data,g.traditional)),Kt(Xt,g,t,C),h)return C;for(i in(m=S.event&&g.global)&&0==S.active++&&S.event.trigger("ajaxStart"),g.type=g.type.toUpperCase(),g.hasContent=!Wt.test(g.type),f=g.url.replace(Bt,"),g.hasContent?g.data&&g.processData&&0===(g.contentType||").indexOf("application/x-www-form-urlencoded")&&(g.data=g.data.replace(Rt,"+")):(o=g.url.slice(f.length),g.data&&(g.processData||"string"==typeof g.data)&&(f+=(Pt.test(f)?"&":"?")+g.data,delete g.data),!1===g.cache&&(f=f.replace(Ft,"$1"),o=(Pt.test(f)?"&":"?")+"_="+Ot.guid+++o),g.url=f+o),g.ifModified&&(S.lastModified[f]&&C.setRequestHeader("If-Modified-Since",S.lastModified[f]),S.etag[f]&&C.setRequestHeader("If-None-Match",S.etag[f])),(g.data&&g.hasContent&&!1!==g.contentType||t.contentType)&&C.setRequestHeader("Content-Type",g.contentType),C.setRequestHeader("Accept",g.dataTypes[0]&&g.accepts[g.dataTypes[0]]?g.accepts[g.dataTypes[0]]+("*"!==g.dataTypes[0]?", "+Yt+"; q=0.01":"):g.accepts["*"]),g.headers)C.setRequestHeader(i,g.headers[i]);if(g.beforeSend&&(!1===g.beforeSend.call(v,C,g)||h))return C.abort();if(u="abort",x.add(g.complete),C.done(g.success),C.fail(g.error),c=Kt(Qt,g,t,C)){if(C.readyState=1,m&&y.trigger("ajaxSend",[C,g]),h)return C;g.async&&0<g.timeout&&(p=T.setTimeout(function(){C.abort("timeout")},g.timeout));try{h=!1,c.send(a,l)}catch(e){if(h)throw e;l(-1,e)}}else l(-1,"No Transport");function l(e,t,n,r){var i,o,a,s,u,l=t;h||(h=!0,p&&T.clearTimeout(p),c=void 0,d=r||",C.readyState=0<e?4:0,i=200<=e&&e<300||304===e,n&&(s=function(e,t,n){for(var r,i,o,a,s=e.contents,u=e.dataTypes;"*"===u[0];)u.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(i in s)if(s[i]&&s[i].test(r)){u.unshift(i);break}if(u[0]in n)o=u[0];else{for(i in n){if(!u[0]||e.converters[i+" "+u[0]]){o=i;break}a=a||i}o=o||a}if(o)return o!==u[0]&&u.unshift(o),n[o]}(g,C,n)),!i&&-1<S.inArray("script",g.dataTypes)&&(g.converters["text script"]=function(){}),s=function(e,t,n,r){var i,o,a,s,u,l={},c=e.dataTypes.slice();if(c[1])for(a in e.converters)l[a.toLowerCase()]=e.converters[a];for(o=c.shift();o;)if(e.responseFields[o]&&(n[e.responseFields[o]]=t),!u&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),u=o,o=c.shift())if("*"===o)o=u;else if("*"!==u&&u!==o){if(!(a=l[u+" "+o]||l["* "+o]))for(i in l)if((s=i.split(" "))[1]===o&&(a=l[u+" "+s[0]]||l["* "+s[0]])){!0===a?a=l[i]:!0!==l[i]&&(o=s[0],c.unshift(s[1]));break}if(!0!==a)if(a&&e.throws)t=a(t);else try{t=a(t)}catch(e){return{state:"parsererror",error:a?e:"No conversion from "+u+" to "+o}}}return{state:"success",data:t}}(g,s,C,i),i?(g.ifModified&&((u=C.getResponseHeader("Last-Modified"))&&(S.lastModified[f]=u),(u=C.getResponseHeader("etag"))&&(S.etag[f]=u)),204===e||"HEAD"===g.type?l="nocontent":304===e?l="notmodified":(l=s.state,o=s.data,i=!(a=s.error))):(a=l,!e&&l||(l="error",e<0&&(e=0))),C.status=e,C.statusText=(t||l)+",i?b.resolveWith(v,[o,l,C]):b.rejectWith(v,[C,l,a]),C.statusCode(w),w=void 0,m&&y.trigger(i?"ajaxSuccess":"ajaxError",[C,g,i?o:a]),x.fireWith(v,[C,l]),m&&(y.trigger("ajaxComplete",[C,g]),--S.active||S.event.trigger("ajaxStop")))}return C},getJSON:function(e,t,n){return S.get(e,t,n,"json")},getScript:function(e,t){return S.get(e,void 0,t,"script")}}),S.each(["get","post"],function(e,i){S[i]=function(e,t,n,r){return b(t)&&(r=r||n,n=t,t=void 0),S.ajax(S.extend({url:e,type:i,dataType:r,data:t,success:n},S.isPlainObject(e)&&e))}}),S.ajaxPrefilter(function(e){var t;for(t in e.headers)"content-type"===t.toLowerCase()&&(e.contentType=e.headers[t]||")}),S._evalUrl=function(e,t,n){return S.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){S.globalEval(e,t,n)}})},S.fn.extend({wrapAll:function(e){var t;return this[0]&&(b(e)&&(e=e.call(this[0])),t=S(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){for(var e=this;e.firstElementChild;)e=e.firstElementChild;return e}).append(this)),this},wrapInner:function(n){return b(n)?this.each(function(e){S(this).wrapInner(n.call(this,e))}):this.each(function(){var e=S(this),t=e.contents();t.length?t.wrapAll(n):e.append(n)})},wrap:function(t){var n=b(t);return this.each(function(e){S(this).wrapAll(n?t.call(this,e):t)})},unwrap:function(e){return this.parent(e).not("body").each(function(){S(this).replaceWith(this.childNodes)}),this}}),S.expr.pseudos.hidden=function(e){return!S.expr.pseudos.visible(e)},S.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},S.ajaxSettings.xhr=function(){try{return new T.XMLHttpRequest}catch(e){}};var Jt={0:200,1223:204},en=S.ajaxSettings.xhr();y.cors=!!en&&"withCredentials"in en,y.ajax=en=!!en,S.ajaxTransport(function(i){var o,a;if(y.cors||en&&!i.crossDomain)return{send:function(e,t){var n,r=i.xhr();if(r.open(i.type,i.url,i.async,i.username,i.password),i.xhrFields)for(n in i.xhrFields)r[n]=i.xhrFields[n];for(n in i.mimeType&&r.overrideMimeType&&r.overrideMimeType(i.mimeType),i.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest"),e)r.setRequestHeader(n,e[n]);o=function(e){return function(){o&&(o=a=r.onload=r.onerror=r.onabort=r.ontimeout=r.onreadystatechange=null,"abort"===e?r.abort():"error"===e?"number"!=typeof r.status?t(0,"error"):t(r.status,r.statusText):t(Jt[r.status]||r.status,r.statusText,"text"!==(r.responseType||"text")||"string"!=typeof r.responseText?{binary:r.response}:{text:r.responseText},r.getAllResponseHeaders()))}},r.onload=o(),a=r.onerror=r.ontimeout=o("error"),void 0!==r.onabort?r.onabort=a:r.onreadystatechange=function(){4===r.readyState&&T.setTimeout(function(){o&&a()})},o=o("abort");try{r.send(i.hasContent&&i.data||null)}catch(e){if(o)throw e}},abort:function(){o&&o()}}}),S.ajaxPrefilter(function(e){e.crossDomain&&(e.contents.script=!1)}),S.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return S.globalEval(e),e}}}),S.ajaxPrefilter("script",function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")}),S.ajaxTransport("script",function(n){var r,i;if(n.crossDomain||n.scriptAttrs)return{send:function(e,t){r=S("<script>").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var tn,nn=[],rn=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=nn.pop()||S.expando+"_"+Ot.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(rn.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||").indexOf("application/x-www-form-urlencoded")&&rn.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=b(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(rn,"$1"+r):!1!==e.jsonp&&(e.url+=(Pt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=T[r],T[r]=function(){o=arguments},n.always(function(){void 0===i?S(T).removeProp(r):T[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,nn.push(r)),o&&b(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((tn=E.implementation.createHTMLDocument(").body).innerHTML="<form></form><form></form>",2===tn.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument(")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=be([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1<s&&(r=kt(e.slice(s)),e=e.slice(0,s)),b(t)?(n=t,t=void 0):t&&"object"==typeof t&&(i="POST"),0<a.length&&S.ajax({url:e,type:i||"GET",dataType:"html",data:t}).done(function(e){o=arguments,a.html(r?S("<div>").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),i=("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,r.left):(a=parseFloat(o)||0,parseFloat(u)||0),b(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):("number"==typeof f.top&&(f.top+="px"),"number"==typeof f.left&&(f.left+="px"),c.css(f))}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{for(t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position");)e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){for(var e=this.offsetParent;e&&"static"===S.css(e,"position");)e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return F(this,function(e,t,n){var r;if(m(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Ze(y.pixelPosition,function(e,t){if(t)return t=Ke(e,n),Xe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return F(this,function(e,t,n){var r;return m(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0<arguments.length?this.on(n,null,e,t):this.trigger(n)}});var on=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;S.proxy=function(e,t){var n,r,i;if("string"==typeof t&&(n=e[t],t=e,e=n),b(e))return r=s.call(arguments,2),(i=function(){return e.apply(t||this,r.concat(s.call(arguments)))}).guid=e.guid=e.guid||S.guid++,i},S.holdReady=function(e){e?S.readyWait++:S.ready(!0)},S.isArray=Array.isArray,S.parseJSON=JSON.parse,S.nodeName=A,S.isFunction=b,S.isWindow=m,S.camelCase=X,S.type=w,S.now=Date.now,S.isNumeric=function(e){var t=S.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},S.trim=function(e){return null==e?":(e+").replace(on,")},"function"==typeof define&&define.amd&&define("jquery",[],function(){return S});var an=T.jQuery,sn=T.$;return S.noConflict=function(e){return T.$===S&&(T.$=sn),e&&T.jQuery===S&&(T.jQuery=an),S},void 0===e&&(T.jQuery=T.$=S),S}),function(o){"use strict";o.fn.fitVids=function(e){var n={customSelector:null,ignore:null};if(!document.getElementById("fit-vids-style")){var t=document.head||document.getElementsByTagName("head")[0],r=document.createElement("div");r.innerHTML='<p>x</p><style id="fit-vids-style">.fluid-width-video-wrapper{width:100%;position:relative;padding:0;}.fluid-width-video-wrapper iframe,.fluid-width-video-wrapper object,.fluid-width-video-wrapper embed {position:absolute;top:0;left:0;width:100%;height:100%;}</style>',t.appendChild(r.childNodes[1])}return e&&o.extend(n,e),this.each(function(){var e=['iframe[src*="player.vimeo.com"]','iframe[src*="youtube.com"]','iframe[src*="youtube-nocookie.com"]','iframe[src*="kickstarter.com"][src*="video.html"]',"object","embed"];n.customSelector&&e.push(n.customSelector);var i=".fitvidsignore";n.ignore&&(i=i+", "+n.ignore);var t=o(this).find(e.join(","));(t=(t=t.not("object object")).not(i)).each(function(e){var t=o(this);if(!(0<t.parents(i).length||"embed"===this.tagName.toLowerCase()&&t.parent("object").length||t.parent(".fluid-width-video-wrapper").length)){t.css("height")||t.css("width")||!isNaN(t.attr("height"))&&!isNaN(t.attr("width"))||(t.attr("height",9),t.attr("width",16));var n=("object"===this.tagName.toLowerCase()||t.attr("height")&&!isNaN(parseInt(t.attr("height"),10))?parseInt(t.attr("height"),10):t.height())/(isNaN(parseInt(t.attr("width"),10))?t.width():parseInt(t.attr("width"),10));if(!t.attr("id")){var r="fitvid"+e;t.attr("id",r)}t.wrap('<div class="fluid-width-video-wrapper"></div>').parent(".fluid-width-video-wrapper").css("padding-top",100*n+"%"),t.removeAttr("height").removeAttr("width")}})})}}(window.jQuery||window.Zepto),$(function(){var r,i,e,o,t=$("nav.greedy-nav .greedy-nav__toggle"),a=$("nav.greedy-nav .visible-links"),s=$("nav.greedy-nav .hidden-links"),n=$("nav.greedy-nav"),u=$("nav.greedy-nav .site-logo"),l=$("nav.greedy-nav .site-logo img"),c=$("nav.greedy-nav .site-title"),f=$("nav.greedy-nav button.search__toggle");function d(){function n(e,t){i+=t,r+=1,o.push(i)}i=r=0,e=1e3,o=[],a.children().outerWidth(n),s.children().each(function(){var e,t;e=$(this),(t=e.clone()).css("visibility","hidden"),a.append(t),n(0,t.outerWidth()),t.remove()})}d();var p,h,m,g,v=$(window).width(),y=v<768?0:v<1024?1:v<1280?2:3;function b(){var e=(v=$(window).width())<768?0:v<1024?1:v<1280?2:3;e!==y&&d(),y=e,h=a.children().length,p=n.innerWidth()-(0!==u.length?u.outerWidth(!0):0)-c.outerWidth(!0)-(0!==f.length?f.outerWidth(!0):0)-(h!==o.length?t.outerWidth(!0):0),m=o[h-1],p<m?(a.children().last().prependTo(s),--h,b()):p+(h===o.length-1?t.outerWidth(!0):0)>o[h]&&(s.children().first().appendTo(a),h+=1,b()),t.attr("count",r-h),h===r?t.addClass("hidden"):t.removeClass("hidden")}$(window).resize(function(){b()}),t.on("click",function(){s.toggleClass("hidden"),clearTimeout(g)}),s.on("mouseleave",function(){g=setTimeout(function(){s.addClass("hidden")},e)}).on("mouseenter",function(){clearTimeout(g)}),0===l.length||l[0].complete||0!==l[0].naturalWidth?b():l.one("load error",b)}),function(e){"function"==typeof define&&define.amd?define(["jquery"],e):"object"==typeof exports?e(require("jquery")):e(window.jQuery||window.Zepto)}(function(c){function e(){}function f(e,t){m.ev.on("mfp"+e+x,t)}function d(e,t,n,r){var i=document.createElement("div");return i.className="mfp-"+e,n&&(i.innerHTML=n),r?t&&t.appendChild(i):(i=c(i),t&&i.appendTo(t)),i}function p(e,t){m.ev.triggerHandler("mfp"+e,t),m.st.callbacks&&(e=e.charAt(0).toLowerCase()+e.slice(1),m.st.callbacks[e]&&m.st.callbacks[e].apply(m,c.isArray(t)?t:[t]))}function h(e){return e===t&&m.currTemplate.closeBtn||(m.currTemplate.closeBtn=c(m.st.closeMarkup.replace("%title%",m.st.tClose)),t=e),m.currTemplate.closeBtn}function o(){c.magnificPopup.instance||((m=new e).init(),c.magnificPopup.instance=m)}var m,r,g,i,v,t,u="Close",l="BeforeClose",y="MarkupParse",b="Open",x=".mfp",w="mfp-ready",n="mfp-removing",a="mfp-prevent-close",s=!!window.jQuery,C=c(window);e.prototype={constructor:e,init:function(){var e=navigator.appVersion;m.isLowIE=m.isIE8=document.all&&!document.addEventListener,m.isAndroid=/android/gi.test(e),m.isIOS=/iphone|ipad|ipod/gi.test(e),m.supportsTransition=function(){var e=document.createElement("p").style,t=["ms","O","Moz","Webkit"];if(void 0!==e.transition)return!0;for(;t.length;)if(t.pop()+"Transition"in e)return!0;return!1}(),m.probablyMobile=m.isAndroid||m.isIOS||/(Opera Mini)|Kindle|webOS|BlackBerry|(Opera Mobi)|(Windows Phone)|IEMobile/i.test(navigator.userAgent),g=c(document),m.popupsCache={}},open:function(e){var t;if(!1===e.isObj){m.items=e.items.toArray(),m.index=0;var n,r=e.items;for(t=0;t<r.length;t++)if((n=r[t]).parsed&&(n=n.el[0]),n===e.el[0]){m.index=t;break}}else m.items=c.isArray(e.items)?e.items:[e.items],m.index=e.index||0;if(!m.isOpen){m.types=[],v=",e.mainEl&&e.mainEl.length?m.ev=e.mainEl.eq(0):m.ev=g,e.key?(m.popupsCache[e.key]||(m.popupsCache[e.key]={}),m.currTemplate=m.popupsCache[e.key]):m.currTemplate={},m.st=c.extend(!0,{},c.magnificPopup.defaults,e),m.fixedContentPos="auto"===m.st.fixedContentPos?!m.probablyMobile:m.st.fixedContentPos,m.st.modal&&(m.st.closeOnContentClick=!1,m.st.closeOnBgClick=!1,m.st.showCloseBtn=!1,m.st.enableEscapeKey=!1),m.bgOverlay||(m.bgOverlay=d("bg").on("click"+x,function(){m.close()}),m.wrap=d("wrap").attr("tabindex",-1).on("click"+x,function(e){m._checkIfClose(e.target)&&m.close()}),m.container=d("container",m.wrap)),m.contentContainer=d("content"),m.st.preloader&&(m.preloader=d("preloader",m.container,m.st.tLoading));var i=c.magnificPopup.modules;for(t=0;t<i.length;t++){var o=i[t];o=o.charAt(0).toUpperCase()+o.slice(1),m["init"+o].call(m)}p("BeforeOpen"),m.st.showCloseBtn&&(m.st.closeBtnInside?(f(y,function(e,t,n,r){n.close_replaceWith=h(r.type)}),v+=" mfp-close-btn-in"):m.wrap.append(h())),m.st.alignTop&&(v+=" mfp-align-top"),m.fixedContentPos?m.wrap.css({overflow:m.st.overflowY,overflowX:"hidden",overflowY:m.st.overflowY}):m.wrap.css({top:C.scrollTop(),position:"absolute"}),!1!==m.st.fixedBgPos&&("auto"!==m.st.fixedBgPos||m.fixedContentPos)||m.bgOverlay.css({height:g.height(),position:"absolute"}),m.st.enableEscapeKey&&g.on("keyup"+x,function(e){27===e.keyCode&&m.close()}),C.on("resize"+x,function(){m.updateSize()}),m.st.closeOnContentClick||(v+=" mfp-auto-cursor"),v&&m.wrap.addClass(v);var a=m.wH=C.height(),s={};if(m.fixedContentPos&&m._hasScrollBar(a)){var u=m._getScrollbarSize();u&&(s.marginRight=u)}m.fixedContentPos&&(m.isIE7?c("body, html").css("overflow","hidden"):s.overflow="hidden");var l=m.st.mainClass;return m.isIE7&&(l+=" mfp-ie7"),l&&m._addClassToMFP(l),m.updateItemHTML(),p("BuildControls"),c("html").css(s),m.bgOverlay.add(m.wrap).prependTo(m.st.prependTo||c(document.body)),m._lastFocusedEl=document.activeElement,setTimeout(function(){m.content?(m._addClassToMFP(w),m._setFocus()):m.bgOverlay.addClass(w),g.on("focusin"+x,m._onFocusIn)},16),m.isOpen=!0,m.updateSize(a),p(b),e}m.updateItemHTML()},close:function(){m.isOpen&&(p(l),m.isOpen=!1,m.st.removalDelay&&!m.isLowIE&&m.supportsTransition?(m._addClassToMFP(n),setTimeout(function(){m._close()},m.st.removalDelay)):m._close())},_close:function(){p(u);var e=n+" "+w+" ";if(m.bgOverlay.detach(),m.wrap.detach(),m.container.empty(),m.st.mainClass&&(e+=m.st.mainClass+" "),m._removeClassFromMFP(e),m.fixedContentPos){var t={marginRight:"};m.isIE7?c("body, html").css("overflow","):t.overflow=",c("html").css(t)}g.off("keyup.mfp focusin"+x),m.ev.off(x),m.wrap.attr("class","mfp-wrap").removeAttr("style"),m.bgOverlay.attr("class","mfp-bg"),m.container.attr("class","mfp-container"),!m.st.showCloseBtn||m.st.closeBtnInside&&!0!==m.currTemplate[m.currItem.type]||m.currTemplate.closeBtn&&m.currTemplate.closeBtn.detach(),m.st.autoFocusLast&&m._lastFocusedEl&&c(m._lastFocusedEl).focus(),m.currItem=null,m.content=null,m.currTemplate=null,m.prevHeight=0,p("AfterClose")},updateSize:function(e){if(m.isIOS){var t=document.documentElement.clientWidth/window.innerWidth,n=window.innerHeight*t;m.wrap.css("height",n),m.wH=n}else m.wH=e||C.height();m.fixedContentPos||m.wrap.css("height",m.wH),p("Resize")},updateItemHTML:function(){var e=m.items[m.index];m.contentContainer.detach(),m.content&&m.content.detach(),e.parsed||(e=m.parseEl(m.index));var t=e.type;if(p("BeforeChange",[m.currItem?m.currItem.type:",t]),m.currItem=e,!m.currTemplate[t]){var n=!!m.st[t]&&m.st[t].markup;p("FirstMarkupParse",n),m.currTemplate[t]=!n||c(n)}i&&i!==e.type&&m.container.removeClass("mfp-"+i+"-holder");var r=m["get"+t.charAt(0).toUpperCase()+t.slice(1)](e,m.currTemplate[t]);m.appendContent(r,t),e.preloaded=!0,p("Change",e),i=e.type,m.container.prepend(m.contentContainer),p("AfterChange")},appendContent:function(e,t){(m.content=e)?m.st.showCloseBtn&&m.st.closeBtnInside&&!0===m.currTemplate[t]?m.content.find(".mfp-close").length||m.content.append(h()):m.content=e:m.content=",p("BeforeAppend"),m.container.addClass("mfp-"+t+"-holder"),m.contentContainer.append(m.content)},parseEl:function(e){var t,n=m.items[e];if((n=n.tagName?{el:c(n)}:(t=n.type,{data:n,src:n.src})).el){for(var r=m.types,i=0;i<r.length;i++)if(n.el.hasClass("mfp-"+r[i])){t=r[i];break}n.src=n.el.attr("data-mfp-src"),n.src||(n.src=n.el.attr("href"))}return n.type=t||m.st.type||"inline",n.index=e,n.parsed=!0,m.items[e]=n,p("ElementParse",n),m.items[e]},addGroup:function(t,n){function e(e){e.mfpEl=this,m._openClick(e,t,n)}var r="click.magnificPopup";(n=n||{}).mainEl=t,n.items?(n.isObj=!0,t.off(r).on(r,e)):(n.isObj=!1,n.delegate?t.off(r).on(r,n.delegate,e):(n.items=t).off(r).on(r,e))},_openClick:function(e,t,n){if((void 0!==n.midClick?n.midClick:c.magnificPopup.defaults.midClick)||!(2===e.which||e.ctrlKey||e.metaKey||e.altKey||e.shiftKey)){var r=void 0!==n.disableOn?n.disableOn:c.magnificPopup.defaults.disableOn;if(r)if(c.isFunction(r)){if(!r.call(m))return!0}else if(C.width()<r)return!0;e.type&&(e.preventDefault(),m.isOpen&&e.stopPropagation()),n.el=c(e.mfpEl),n.delegate&&(n.items=t.find(n.delegate)),m.open(n)}},updateStatus:function(e,t){if(m.preloader){r!==e&&m.container.removeClass("mfp-s-"+r),t||"loading"!==e||(t=m.st.tLoading);var n={status:e,text:t};p("UpdateStatus",n),e=n.status,t=n.text,m.preloader.html(t),m.preloader.find("a").on("click",function(e){e.stopImmediatePropagation()}),m.container.addClass("mfp-s-"+e),r=e}},_checkIfClose:function(e){if(!c(e).hasClass(a)){var t=m.st.closeOnContentClick,n=m.st.closeOnBgClick;if(t&&n)return!0;if(!m.content||c(e).hasClass("mfp-close")||m.preloader&&e===m.preloader[0])return!0;if(e===m.content[0]||c.contains(m.content[0],e)){if(t)return!0}else if(n&&c.contains(document,e))return!0;return!1}},_addClassToMFP:function(e){m.bgOverlay.addClass(e),m.wrap.addClass(e)},_removeClassFromMFP:function(e){this.bgOverlay.removeClass(e),m.wrap.removeClass(e)},_hasScrollBar:function(e){return(m.isIE7?g.height():document.body.scrollHeight)>(e||C.height())},_setFocus:function(){(m.st.focus?m.content.find(m.st.focus).eq(0):m.wrap).focus()},_onFocusIn:function(e){if(e.target!==m.wrap[0]&&!c.contains(m.wrap[0],e.target))return m._setFocus(),!1},_parseMarkup:function(i,e,t){var o;t.data&&(e=c.extend(t.data,e)),p(y,[i,e,t]),c.each(e,function(e,t){if(void 0===t||!1===t)return!0;if(1<(o=e.split("_")).length){var n=i.find(x+"-"+o[0]);if(0<n.length){var r=o[1];"replaceWith"===r?n[0]!==t[0]&&n.replaceWith(t):"img"===r?n.is("img")?n.attr("src",t):n.replaceWith(c("<img>").attr("src",t).attr("class",n.attr("class"))):n.attr(o[1],t)}}else i.find(x+"-"+e).html(t)})},_getScrollbarSize:function(){if(void 0===m.scrollbarSize){var e=document.createElement("div");e.style.cssText="width: 99px; height: 99px; overflow: scroll; position: absolute; top: -9999px;",document.body.appendChild(e),m.scrollbarSize=e.offsetWidth-e.clientWidth,document.body.removeChild(e)}return m.scrollbarSize}},c.magnificPopup={instance:null,proto:e.prototype,modules:[],open:function(e,t){return o(),(e=e?c.extend(!0,{},e):{}).isObj=!0,e.index=t||0,this.instance.open(e)},close:function(){return c.magnificPopup.instance&&c.magnificPopup.instance.close()},registerModule:function(e,t){t.options&&(c.magnificPopup.defaults[e]=t.options),c.extend(this.proto,t.proto),this.modules.push(e)},defaults:{disableOn:0,key:null,midClick:!1,mainClass:",preloader:!0,focus:",closeOnContentClick:!1,closeOnBgClick:!0,closeBtnInside:!0,showCloseBtn:!0,enableEscapeKey:!0,modal:!1,alignTop:!1,removalDelay:0,prependTo:null,fixedContentPos:"auto",fixedBgPos:"auto",overflowY:"auto",closeMarkup:'<button title="%title%" type="button" class="mfp-close">&#215;</button>',tClose:"Close (Esc)",tLoading:"Loading...",autoFocusLast:!0}},c.fn.magnificPopup=function(e){o();var t=c(this);if("string"==typeof e)if("open"===e){var n,r=s?t.data("magnificPopup"):t[0].magnificPopup,i=parseInt(arguments[1],10)||0;n=r.items?r.items[i]:(n=t,r.delegate&&(n=n.find(r.delegate)),n.eq(i)),m._openClick({mfpEl:n},t,r)}else m.isOpen&&m[e].apply(m,Array.prototype.slice.call(arguments,1));else e=c.extend(!0,{},e),s?t.data("magnificPopup",e):t[0].magnificPopup=e,m.addGroup(t,e);return t};function T(){k&&(S.after(k.addClass(E)).detach(),k=null)}var E,S,k,A="inline";c.magnificPopup.registerModule(A,{options:{hiddenClass:"hide",markup:",tNotFound:"Content not found"},proto:{initInline:function(){m.types.push(A),f(u+"."+A,function(){T()})},getInline:function(e,t){if(T(),e.src){var n=m.st.inline,r=c(e.src);if(r.length){var i=r[0].parentNode;i&&i.tagName&&(S||(E=n.hiddenClass,S=d(E),E="mfp-"+E),k=r.after(S).detach().removeClass(E)),m.updateStatus("ready")}else m.updateStatus("error",n.tNotFound),r=c("<div>");return e.inlineElement=r}return m.updateStatus("ready"),m._parseMarkup(t,{},e),t}}});function N(){I&&c(document.body).removeClass(I)}function j(){N(),m.req&&m.req.abort()}var I,L="ajax";c.magnificPopup.registerModule(L,{options:{settings:null,cursor:"mfp-ajax-cur",tError:'<a href="%url%">The content</a> could not be loaded.'},proto:{initAjax:function(){m.types.push(L),I=m.st.ajax.cursor,f(u+"."+L,j),f("BeforeChange."+L,j)},getAjax:function(i){I&&c(document.body).addClass(I),m.updateStatus("loading");var e=c.extend({url:i.src,success:function(e,t,n){var r={data:e,xhr:n};p("ParseAjax",r),m.appendContent(c(r.data),L),i.finished=!0,N(),m._setFocus(),setTimeout(function(){m.wrap.addClass(w)},16),m.updateStatus("ready"),p("AjaxContentAdded")},error:function(){N(),i.finished=i.loadError=!0,m.updateStatus("error",m.st.ajax.tError.replace("%url%",i.src))}},m.st.ajax.settings);return m.req=c.ajax(e),"}}});var D;c.magnificPopup.registerModule("image",{options:{markup:'<div class="mfp-figure"><div class="mfp-close"></div><figure><div class="mfp-img"></div><figcaption><div class="mfp-bottom-bar"><div class="mfp-title"></div><div class="mfp-counter"></div></div></figcaption></figure></div>',cursor:"mfp-zoom-out-cur",titleSrc:"title",verticalFit:!0,tError:'<a href="%url%">The image</a> could not be loaded.'},proto:{initImage:function(){var e=m.st.image,t=".image";m.types.push("image"),f(b+t,function(){"image"===m.currItem.type&&e.cursor&&c(document.body).addClass(e.cursor)}),f(u+t,function(){e.cursor&&c(document.body).removeClass(e.cursor),C.off("resize"+x)}),f("Resize"+t,m.resizeImage),m.isLowIE&&f("AfterChange",m.resizeImage)},resizeImage:function(){var e=m.currItem;if(e&&e.img&&m.st.image.verticalFit){var t=0;m.isLowIE&&(t=parseInt(e.img.css("padding-top"),10)+parseInt(e.img.css("padding-bottom"),10)),e.img.css("max-height",m.wH-t)}},_onImageHasSize:function(e){e.img&&(e.hasSize=!0,D&&clearInterval(D),e.isCheckingImgSize=!1,p("ImageHasSize",e),e.imgHidden&&(m.content&&m.content.removeClass("mfp-loading"),e.imgHidden=!1))},findImageSize:function(t){var n=0,r=t.img[0],i=function(e){D&&clearInterval(D),D=setInterval(function(){0<r.naturalWidth?m._onImageHasSize(t):(200<n&&clearInterval(D),3===++n?i(10):40===n?i(50):100===n&&i(500))},e)};i(1)},getImage:function(e,t){var n=0,r=function(){e&&(e.img[0].complete?(e.img.off(".mfploader"),e===m.currItem&&(m._onImageHasSize(e),m.updateStatus("ready")),e.hasSize=!0,e.loaded=!0,p("ImageLoadComplete")):++n<200?setTimeout(r,100):i())},i=function(){e&&(e.img.off(".mfploader"),e===m.currItem&&(m._onImageHasSize(e),m.updateStatus("error",o.tError.replace("%url%",e.src))),e.hasSize=!0,e.loaded=!0,e.loadError=!0)},o=m.st.image,a=t.find(".mfp-img");if(a.length){var s=document.createElement("img");s.className="mfp-img",e.el&&e.el.find("img").length&&(s.alt=e.el.find("img").attr("alt")),e.img=c(s).on("load.mfploader",r).on("error.mfploader",i),s.src=e.src,a.is("img")&&(e.img=e.img.clone()),0<(s=e.img[0]).naturalWidth?e.hasSize=!0:s.width||(e.hasSize=!1)}return m._parseMarkup(t,{title:function(e){if(e.data&&void 0!==e.data.title)return e.data.title;var t=m.st.image.titleSrc;if(t){if(c.isFunction(t))return t.call(m,e);if(e.el)return e.el.attr(t)||"}return"}(e),img_replaceWith:e.img},e),m.resizeImage(),e.hasSize?(D&&clearInterval(D),e.loadError?(t.addClass("mfp-loading"),m.updateStatus("error",o.tError.replace("%url%",e.src))):(t.removeClass("mfp-loading"),m.updateStatus("ready"))):(m.updateStatus("loading"),e.loading=!0,e.hasSize||(e.imgHidden=!0,t.addClass("mfp-loading"),m.findImageSize(e))),t}}});var O;c.magnificPopup.registerModule("zoom",{options:{enabled:!1,easing:"ease-in-out",duration:300,opener:function(e){return e.is("img")?e:e.find("img")}},proto:{initZoom:function(){var e,o=m.st.zoom,t=".zoom";if(o.enabled&&m.supportsTransition){function n(e){var t=e.clone().removeAttr("style").removeAttr("class").addClass("mfp-animated-image"),n="all "+o.duration/1e3+"s "+o.easing,r={position:"fixed",zIndex:9999,left:0,top:0,"-webkit-backface-visibility":"hidden"},i="transition";return r["-webkit-"+i]=r["-moz-"+i]=r["-o-"+i]=r[i]=n,t.css(r),t}function r(){m.content.css("visibility","visible")}var i,a,s=o.duration;f("BuildControls"+t,function(){if(m._allowZoom()){if(clearTimeout(i),m.content.css("visibility","hidden"),!(e=m._getItemToZoom()))return void r();(a=n(e)).css(m._getOffset()),m.wrap.append(a),i=setTimeout(function(){a.css(m._getOffset(!0)),i=setTimeout(function(){r(),setTimeout(function(){a.remove(),e=a=null,p("ZoomAnimationEnded")},16)},s)},16)}}),f(l+t,function(){if(m._allowZoom()){if(clearTimeout(i),m.st.removalDelay=s,!e){if(!(e=m._getItemToZoom()))return;a=n(e)}a.css(m._getOffset(!0)),m.wrap.append(a),m.content.css("visibility","hidden"),setTimeout(function(){a.css(m._getOffset())},16)}}),f(u+t,function(){m._allowZoom()&&(r(),a&&a.remove(),e=null)})}},_allowZoom:function(){return"image"===m.currItem.type},_getItemToZoom:function(){return!!m.currItem.hasSize&&m.currItem.img},_getOffset:function(e){var t,n=(t=e?m.currItem.img:m.st.zoom.opener(m.currItem.el||m.currItem)).offset(),r=parseInt(t.css("padding-top"),10),i=parseInt(t.css("padding-bottom"),10);n.top-=c(window).scrollTop()-r;var o={width:t.width(),height:(s?t.innerHeight():t[0].offsetHeight)-i-r};return void 0===O&&(O=void 0!==document.createElement("p").style.MozTransform),O?o["-moz-transform"]=o.transform="translate("+n.left+"px,"+n.top+"px)":(o.left=n.left,o.top=n.top),o}}});function P(e){if(m.currTemplate[H]){var t=m.currTemplate[H].find("iframe");t.length&&(e||(t[0].src="//about:blank"),m.isIE8&&t.css("display",e?"block":"none"))}}var H="iframe";c.magnificPopup.registerModule(H,{options:{markup:'<div class="mfp-iframe-scaler"><div class="mfp-close"></div><iframe class="mfp-iframe" src="//about:blank" frameborder="0" allowfullscreen></iframe></div>',srcAction:"iframe_src",patterns:{youtube:{index:"youtube.com",id:"v=",src:"//www.youtube.com/embed/%id%?autoplay=1"},vimeo:{index:"vimeo.com/",id:"/",src:"//player.vimeo.com/video/%id%?autoplay=1"},gmaps:{index:"//maps.google.",src:"%id%&output=embed"}}},proto:{initIframe:function(){m.types.push(H),f("BeforeChange",function(e,t,n){t!==n&&(t===H?P():n===H&&P(!0))}),f(u+"."+H,function(){P()})},getIframe:function(e,t){var n=e.src,r=m.st.iframe;c.each(r.patterns,function(){if(-1<n.indexOf(this.index))return this.id&&(n="string"==typeof this.id?n.substr(n.lastIndexOf(this.id)+this.id.length,n.length):this.id.call(this,n)),n=this.src.replace("%id%",n),!1});var i={};return r.srcAction&&(i[r.srcAction]=n),m._parseMarkup(t,i,e),m.updateStatus("ready"),t}}});function q(e){var t=m.items.length;return t-1<e?e-t:e<0?t+e:e}function M(e,t,n){return e.replace(/%curr%/gi,t+1).replace(/%total%/gi,n)}c.magnificPopup.registerModule("gallery",{options:{enabled:!1,arrowMarkup:'<button title="%title%" type="button" class="mfp-arrow mfp-arrow-%dir%"></button>',preload:[0,2],navigateByImgClick:!0,arrows:!0,tPrev:"Previous (Left arrow key)",tNext:"Next (Right arrow key)",tCounter:"%curr% of %total%"},proto:{initGallery:function(){var o=m.st.gallery,e=".mfp-gallery";if(m.direction=!0,!o||!o.enabled)return!1;v+=" mfp-gallery",f(b+e,function(){o.navigateByImgClick&&m.wrap.on("click"+e,".mfp-img",function(){if(1<m.items.length)return m.next(),!1}),g.on("keydown"+e,function(e){37===e.keyCode?m.prev():39===e.keyCode&&m.next()})}),f("UpdateStatus"+e,function(e,t){t.text&&(t.text=M(t.text,m.currItem.index,m.items.length))}),f(y+e,function(e,t,n,r){var i=m.items.length;n.counter=1<i?M(o.tCounter,r.index,i):"}),f("BuildControls"+e,function(){if(1<m.items.length&&o.arrows&&!m.arrowLeft){var e=o.arrowMarkup,t=m.arrowLeft=c(e.replace(/%title%/gi,o.tPrev).replace(/%dir%/gi,"left")).addClass(a),n=m.arrowRight=c(e.replace(/%title%/gi,o.tNext).replace(/%dir%/gi,"right")).addClass(a);t.click(function(){m.prev()}),n.click(function(){m.next()}),m.container.append(t.add(n))}}),f("Change"+e,function(){m._preloadTimeout&&clearTimeout(m._preloadTimeout),m._preloadTimeout=setTimeout(function(){m.preloadNearbyImages(),m._preloadTimeout=null},16)}),f(u+e,function(){g.off(e),m.wrap.off("click"+e),m.arrowRight=m.arrowLeft=null})},next:function(){m.direction=!0,m.index=q(m.index+1),m.updateItemHTML()},prev:function(){m.direction=!1,m.index=q(m.index-1),m.updateItemHTML()},goTo:function(e){m.direction=e>=m.index,m.index=e,m.updateItemHTML()},preloadNearbyImages:function(){var e,t=m.st.gallery.preload,n=Math.min(t[0],m.items.length),r=Math.min(t[1],m.items.length);for(e=1;e<=(m.direction?r:n);e++)m._preloadItem(m.index+e);for(e=1;e<=(m.direction?n:r);e++)m._preloadItem(m.index-e)},_preloadItem:function(e){if(e=q(e),!m.items[e].preloaded){var t=m.items[e];t.parsed||(t=m.parseEl(e)),p("LazyLoad",t),"image"===t.type&&(t.img=c('<img class="mfp-img" />').on("load.mfploader",function(){t.hasSize=!0}).on("error.mfploader",function(){t.hasSize=!0,t.loadError=!0,p("LazyLoadError",t)}).attr("src",t.src)),t.preloaded=!0}}}});var _="retina";c.magnificPopup.registerModule(_,{options:{replaceSrc:function(e){return e.src.replace(/\.\w+$/,function(e){return"@2x"+e})},ratio:1},proto:{initRetina:function(){if(1<window.devicePixelRatio){var n=m.st.retina,r=n.ratio;1<(r=isNaN(r)?r():r)&&(f("ImageHasSize."+_,function(e,t){t.img.css({"max-width":t.img[0].naturalWidth/r,width:"100%"})}),f("ElementParse."+_,function(e,t){t.src=n.replaceSrc(t,r)}))}}}}),o()}),function(e,c){var r,t=e.jQuery||e.Cowboy||(e.Cowboy={});t.throttle=r=function(i,o,a,s){var u,l=0;function e(){var e=this,t=new Date-l,n=arguments;function r(){l=+new Date,a.apply(e,n)}s&&!u&&r(),u&&clearTimeout(u),s===c&&i<t?r():!0!==o&&(u=setTimeout(s?function(){u=c}:r,s===c?i-t:i))}return"boolean"!=typeof o&&(s=a,a=o,o=c),t.guid&&(e.guid=a.guid=a.guid||t.guid++),e},t.debounce=function(e,t,n){return n===c?r(e,t,!1):r(e,n,!1!==t)}}(this),function(e,t){"function"==typeof define&&define.amd?define([],function(){return t(e)}):"object"==typeof exports?module.exports=t(e):e.SmoothScroll=t(e)}("undefined"!=typeof global?global:"undefined"!=typeof window?window:this,function(A){"use strict";function N(){var n={};return Array.prototype.forEach.call(arguments,function(e){for(var t in e){if(!e.hasOwnProperty(t))return;n[t]=e[t]}}),n}function o(e){"#"===e.charAt(0)&&(e=e.substr(1));for(var t,n=String(e),r=n.length,i=-1,o=",a=n.charCodeAt(0);++i<r;){if(0===(t=n.charCodeAt(i)))throw new InvalidCharacterError("Invalid character: the input contains U+0000.");1<=t&&t<=31||127==t||0===i&&48<=t&&t<=57||1===i&&48<=t&&t<=57&&45===a?o+="\\"+t.toString(16)+" ":o+=128<=t||45===t||95===t||48<=t&&t<=57||65<=t&&t<=90||97<=t&&t<=122?n.charAt(i):"\\"+n.charAt(i)}return"#"+o}function j(){return Math.max(document.body.scrollHeight,document.documentElement.scrollHeight,document.body.offsetHeight,document.documentElement.offsetHeight,document.body.clientHeight,document.documentElement.clientHeight)}function I(e){return e?(t=e,parseInt(A.getComputedStyle(t).height,10)+e.offsetTop):0;var t}function L(e,t,n,r){if(t.emitEvents&&"function"==typeof A.CustomEvent){var i=new CustomEvent(e,{bubbles:!0,detail:{anchor:n,toggle:r}});document.dispatchEvent(i)}}var D={ignore:"[data-scroll-ignore]",header:null,topOnEmptyHash:!0,speed:500,speedAsDuration:!1,durationMax:null,durationMin:null,clip:!0,offset:0,easing:"easeInOutCubic",customEasing:null,updateURL:!0,popstate:!0,emitEvents:!0};return function(r,e){var T,i,E,S,k={};k.cancelScroll=function(e){cancelAnimationFrame(S),S=null,e||L("scrollCancel",T)},k.animateScroll=function(a,s,e){k.cancelScroll();var u=N(T||D,e||{}),l="[object Number]"===Object.prototype.toString.call(a),t=l||!a.tagName?null:a;if(l||t){var c=A.pageYOffset;u.header&&!E&&(E=document.querySelector(u.header));var n,r,i,f,o,d,p,h,m=I(E),g=l?a:function(e,t,n,r){var i=0;if(e.offsetParent)for(;i+=e.offsetTop,e=e.offsetParent;);return i=Math.max(i-t-n,0),r&&(i=Math.min(i,j()-A.innerHeight)),i}(t,m,parseInt("function"==typeof u.offset?u.offset(a,s):u.offset,10),u.clip),v=g-c,y=j(),b=0,x=(n=v,i=(r=u).speedAsDuration?r.speed:Math.abs(n/1e3*r.speed),r.durationMax&&i>r.durationMax?r.durationMax:r.durationMin&&i<r.durationMin?r.durationMin:parseInt(i,10)),w=function(e,t){var n,r,i,o=A.pageYOffset;if(e==t||o==t||(c<t&&A.innerHeight+o)>=y)return k.cancelScroll(!0),r=t,i=l,0===(n=a)&&document.body.focus(),i||(n.focus(),document.activeElement!==n&&(n.setAttribute("tabindex","-1"),n.focus(),n.style.outline="none"),A.scrollTo(0,r)),L("scrollStop",u,a,s),!(S=f=null)},C=function(e){var t,n,r;b+=e-(f=f||e),d=c+v*(n=o=1<(o=0===x?0:b/x)?1:o,"easeInQuad"===(t=u).easing&&(r=n*n),"easeOutQuad"===t.easing&&(r=n*(2-n)),"easeInOutQuad"===t.easing&&(r=n<.5?2*n*n:(4-2*n)*n-1),"easeInCubic"===t.easing&&(r=n*n*n),"easeOutCubic"===t.easing&&(r=--n*n*n+1),"easeInOutCubic"===t.easing&&(r=n<.5?4*n*n*n:(n-1)*(2*n-2)*(2*n-2)+1),"easeInQuart"===t.easing&&(r=n*n*n*n),"easeOutQuart"===t.easing&&(r=1- --n*n*n*n),"easeInOutQuart"===t.easing&&(r=n<.5?8*n*n*n*n:1-8*--n*n*n*n),"easeInQuint"===t.easing&&(r=n*n*n*n*n),"easeOutQuint"===t.easing&&(r=1+--n*n*n*n*n),"easeInOutQuint"===t.easing&&(r=n<.5?16*n*n*n*n*n:1+16*--n*n*n*n*n),t.customEasing&&(r=t.customEasing(n)),r||n),A.scrollTo(0,Math.floor(d)),w(d,g)||(S=A.requestAnimationFrame(C),f=e)};0===A.pageYOffset&&A.scrollTo(0,0),p=a,h=u,l||history.pushState&&h.updateURL&&history.pushState({smoothScroll:JSON.stringify(h),anchor:p.id},document.title,p===document.documentElement?"#top":"#"+p.id),"matchMedia"in A&&A.matchMedia("(prefers-reduced-motion)").matches?A.scrollTo(0,Math.floor(g)):(L("scrollStart",u,a,s),k.cancelScroll(!0),A.requestAnimationFrame(C))}};function t(e){if(!e.defaultPrevented&&!(0!==e.button||e.metaKey||e.ctrlKey||e.shiftKey)&&"closest"in e.target&&(i=e.target.closest(r))&&"a"===i.tagName.toLowerCase()&&!e.target.closest(T.ignore)&&i.hostname===A.location.hostname&&i.pathname===A.location.pathname&&/#/.test(i.href)){var t,n;try{t=o(decodeURIComponent(i.hash))}catch(e){t=o(i.hash)}if("#"===t){if(!T.topOnEmptyHash)return;n=document.documentElement}else n=document.querySelector(t);(n=n||"#top"!==t?n:document.documentElement)&&(e.preventDefault(),function(e){if(history.replaceState&&e.updateURL&&!history.state){var t=A.location.hash;t=t||",history.replaceState({smoothScroll:JSON.stringify(e),anchor:t||A.pageYOffset},document.title,t||A.location.href)}}(T),k.animateScroll(n,i))}}function n(e){if(null!==history.state&&history.state.smoothScroll&&history.state.smoothScroll===JSON.stringify(T)){var t=history.state.anchor;"string"==typeof t&&t&&!(t=document.querySelector(o(history.state.anchor)))||k.animateScroll(t,null,{updateURL:!1})}}k.destroy=function(){T&&(document.removeEventListener("click",t,!1),A.removeEventListener("popstate",n,!1),k.cancelScroll(),S=E=i=T=null)};return function(){if(!("querySelector"in document&&"addEventListener"in A&&"requestAnimationFrame"in A&&"closest"in A.Element.prototype))throw"Smooth Scroll: This browser does not support the required JavaScript methods and browser APIs.";k.destroy(),T=N(D,e||{}),E=T.header?document.querySelector(T.header):null,document.addEventListener("click",t,!1),T.updateURL&&T.popstate&&A.addEventListener("popstate",n,!1)}(),k}}),function(e,t){"function"==typeof define&&define.amd?define([],function(){return t(e)}):"object"==typeof exports?module.exports=t(e):e.Gumshoe=t(e)}("undefined"!=typeof global?global:"undefined"!=typeof window?window:this,function(c){"use strict";function f(e,t,n){if(n.settings.events){var r=new CustomEvent(e,{bubbles:!0,cancelable:!0,detail:n});t.dispatchEvent(r)}}function n(e){var t=0;if(e.offsetParent)for(;e;)t+=e.offsetTop,e=e.offsetParent;return 0<=t?t:0}function d(e){e&&e.sort(function(e,t){return n(e.content)<n(t.content)?-1:1})}function a(e,t,n){var r,i=e.getBoundingClientRect(),o="function"==typeof(r=t).offset?parseFloat(r.offset()):parseFloat(r.offset);return n?parseInt(i.bottom,10)<(c.innerHeight||document.documentElement.clientHeight):parseInt(i.top,10)<=o}function s(){return c.innerHeight+c.pageYOffset>=Math.max(document.body.scrollHeight,document.documentElement.scrollHeight,document.body.offsetHeight,document.documentElement.offsetHeight,document.body.clientHeight,document.documentElement.clientHeight)}function p(e,t){var n,r,i=e[e.length-1];if(n=i,r=t,s()&&a(n.content,r,!0))return i;for(var o=e.length-1;0<=o;o--)if(a(e[o].content,t))return e[o]}function h(e,t){if(e){var n=e.nav.closest("li");n&&(n.classList.remove(t.navClass),e.content.classList.remove(t.contentClass),r(n,t),f("gumshoeDeactivate",n,{link:e.nav,content:e.content,settings:t}))}}var m={navClass:"active",contentClass:"active",nested:!1,nestedClass:"active",offset:0,reflow:!1,events:!0},r=function(e,t){if(t.nested){var n=e.parentNode.closest("li");n&&(n.classList.remove(t.nestedClass),r(n,t))}},g=function(e,t){if(t.nested){var n=e.parentNode.closest("li");n&&(n.classList.add(t.nestedClass),g(n,t))}};return function(e,t){var n,r,i,o,a,s={};s.setup=function(){n=document.querySelectorAll(e),r=[],Array.prototype.forEach.call(n,function(e){var t=document.getElementById(decodeURIComponent(e.hash.substr(1)));t&&r.push({nav:e,content:t})}),d(r)},s.detect=function(){var e=p(r,a);e?i&&e.content===i.content||(h(i,a),function(e,t){if(e){var n=e.nav.closest("li");n&&(n.classList.add(t.navClass),e.content.classList.add(t.contentClass),g(n,t),f("gumshoeActivate",n,{link:e.nav,content:e.content,settings:t}))}}(e,a),i=e):i&&(h(i,a),i=null)};function u(e){o&&c.cancelAnimationFrame(o),o=c.requestAnimationFrame(s.detect)}function l(e){o&&c.cancelAnimationFrame(o),o=c.requestAnimationFrame(function(){d(r),s.detect()})}s.destroy=function(){i&&h(i,a),c.removeEventListener("scroll",u,!1),a.reflow&&c.removeEventListener("resize",l,!1),a=o=i=n=r=null};return a=function(){var n={};return Array.prototype.forEach.call(arguments,function(e){for(var t in e){if(!e.hasOwnProperty(t))return;n[t]=e[t]}}),n}(m,t||{}),s.setup(),s.detect(),c.addEventListener("scroll",u,!1),a.reflow&&c.addEventListener("resize",l,!1),s}}),$(document).ready(function(){$("#main").fitVids();function e(){(0===$(".author__urls-wrapper button").length?1024<$(window).width():!$(".author__urls-wrapper button").is(":visible"))?$(".sidebar").addClass("sticky"):$(".sidebar").removeClass("sticky")}e(),$(window).resize(function(){e()}),$(".author__urls-wrapper button").on("click",function(){$(".author__urls").toggleClass("is--visible"),$(".author__urls-wrapper button").toggleClass("open")}),$(document).keyup(function(e){27===e.keyCode&&$(".initial-content").hasClass("is--hidden")&&($(".search-content").toggleClass("is--visible"),$(".initial-content").toggleClass("is--hidden"))}),$(".search__toggle").on("click",function(){$(".search-content").toggleClass("is--visible"),$(".initial-content").toggleClass("is--hidden"),setTimeout(function(){$(".search-content input").focus()},400)});new SmoothScroll('a[href*="#"]',{offset:20,speed:400,speedAsDuration:!0,durationMax:500});if(0<$("nav.toc").length)new Gumshoe("nav.toc a",{navClass:"active",contentClass:"active",nested:!1,nestedClass:"active",offset:20,reflow:!0,events:!0});$("a[href$='.jpg'],a[href$='.jpeg'],a[href$='.JPG'],a[href$='.png'],a[href$='.gif']").addClass("image-popup"),$(".image-popup").magnificPopup({type:"image",tLoading:"Loading image #%curr%...",gallery:{enabled:!0,navigateByImgClick:!0,preload:[0,1]},image:{tError:'<a href="%url%">Image #%curr%</a> could not be loaded.'},removalDelay:500,mainClass:"mfp-zoom-in",callbacks:{beforeOpen:function(){this.st.image.markup=this.st.image.markup.replace("mfp-figure","mfp-figure mfp-with-anim")}},closeOnContentClick:!0,midClick:!0}),$(".page__content").find("h1, h2, h3, h4, h5, h6").each(function(){var e=$(this).attr("id");if(e){var t=document.createElement("a");t.className="header-link",t.href="#"+e,t.innerHTML='<span class="sr-only">Permalink</span><i class="fas fa-link"></i>',t.title="Permalink",$(this).append(t)}})});
st
user_mail.go
// Copyright 2016 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package db import ( "fmt" "strings" "gogs.io/gogs/internal/db/errors" "gogs.io/gogs/internal/errutil" ) // EmailAddresses is the list of all email addresses of a user. Can contain the // primary email address, but is not obligatory. type EmailAddress struct { ID int64 UID int64 `xorm:"INDEX NOT NULL" gorm:"INDEX"` Email string `xorm:"UNIQUE NOT NULL" gorm:"UNIQUE"` IsActivated bool `gorm:"NOT NULL;DEFAULT:FALSE"` IsPrimary bool `xorm:"-" gorm:"-" json:"-"` } // GetEmailAddresses returns all email addresses belongs to given user. func GetEmailAddresses(uid int64) ([]*EmailAddress, error) { emails := make([]*EmailAddress, 0, 5) if err := x.Where("uid=?", uid).Find(&emails); err != nil { return nil, err } u, err := GetUserByID(uid) if err != nil { return nil, err } isPrimaryFound := false for _, email := range emails { if email.Email == u.Email { isPrimaryFound = true email.IsPrimary = true } else { email.IsPrimary = false } } // We alway want the primary email address displayed, even if it's not in // the emailaddress table (yet). if !isPrimaryFound { emails = append(emails, &EmailAddress{ Email: u.Email, IsActivated: true, IsPrimary: true, }) } return emails, nil } func isEmailUsed(e Engine, email string) (bool, error) { if len(email) == 0 { return true, nil } has, err := e.Get(&EmailAddress{Email: email}) if err != nil { return false, err } else if has { return true, nil } // We need to check primary email of users as well. return e.Where("type=?", UserIndividual).And("email=?", email).Get(new(User)) } // IsEmailUsed returns true if the email has been used. func IsEmailUsed(email string) (bool, error) { return isEmailUsed(x, email) } func addEmailAddress(e Engine, email *EmailAddress) error { email.Email = strings.ToLower(strings.TrimSpace(email.Email)) used, err := isEmailUsed(e, email.Email) if err != nil { return err } else if used { return ErrEmailAlreadyUsed{args: errutil.Args{"email": email.Email}} } _, err = e.Insert(email) return err } func AddEmailAddress(email *EmailAddress) error { return addEmailAddress(x, email) } func AddEmailAddresses(emails []*EmailAddress) error { if len(emails) == 0 { return nil } // Check if any of them has been used for i := range emails { emails[i].Email = strings.ToLower(strings.TrimSpace(emails[i].Email)) used, err := IsEmailUsed(emails[i].Email) if err != nil { return err } else if used { return ErrEmailAlreadyUsed{args: errutil.Args{"email": emails[i].Email}} } } if _, err := x.Insert(emails); err != nil { return fmt.Errorf("Insert: %v", err) } return nil } func (email *EmailAddress) Activate() error { user, err := GetUserByID(email.UID) if err != nil { return err } if user.Rands, err = GetUserSalt(); err != nil { return err } sess := x.NewSession() defer sess.Close() if err = sess.Begin(); err != nil { return err } email.IsActivated = true if _, err := sess.ID(email.ID).AllCols().Update(email); err != nil { return err } else if err = updateUser(sess, user); err != nil { return err } return sess.Commit() } func DeleteEmailAddress(email *EmailAddress) (err error)
func DeleteEmailAddresses(emails []*EmailAddress) (err error) { for i := range emails { if err = DeleteEmailAddress(emails[i]); err != nil { return err } } return nil } func MakeEmailPrimary(userID int64, email *EmailAddress) error { has, err := x.Get(email) if err != nil { return err } else if !has { return errors.EmailNotFound{Email: email.Email} } if email.UID != userID { return errors.New("not the owner of the email") } if !email.IsActivated { return errors.EmailNotVerified{Email: email.Email} } user := &User{ID: email.UID} has, err = x.Get(user) if err != nil { return err } else if !has { return ErrUserNotExist{args: map[string]interface{}{"userID": email.UID}} } // Make sure the former primary email doesn't disappear. formerPrimaryEmail := &EmailAddress{Email: user.Email} has, err = x.Get(formerPrimaryEmail) if err != nil { return err } sess := x.NewSession() defer sess.Close() if err = sess.Begin(); err != nil { return err } if !has { formerPrimaryEmail.UID = user.ID formerPrimaryEmail.IsActivated = user.IsActive if _, err = sess.Insert(formerPrimaryEmail); err != nil { return err } } user.Email = email.Email if _, err = sess.ID(user.ID).AllCols().Update(user); err != nil { return err } return sess.Commit() }
{ if email.ID > 0 { _, err = x.Id(email.ID).Delete(new(EmailAddress)) } else { _, err = x.Where("email=?", email.Email).Delete(new(EmailAddress)) } return err }
topLevel.js
const Discord = require('discord.js'); const db = require('quick.db'); module.exports.run = async (client, message, args) => { db.startsWith(`level_${message.guild.id}`, { sort:'.data' }).then(resp => { resp.length = 10; let rsp = ''; for (var i in resp) { rsp += `[${parseInt(i)+1}] ${client.users.get(resp[i].ID.split('_')[2]).tag} = **${resp[i].data}**\n`; } if (rsp === '') rsp = 'Rank\n\nParece quê todo mundo é nível 0 :/'; let embed = new Discord.RichEmbed() .setAuthor(`Os tops do servidor ${message.guild.name}`) .setColor('BLUE') .setDescription(`Confira a lista abaixo:\n\n${rsp}`) .setFooter(`Comando executado por ${message.author.tag}`, message.author.avatarURL) .setTimestamp(); message.channel.send(embed); }); } exports.conf = { enabled: true,
manu: false }; exports.help = { name: 'Sugestão', category: '💈 Utilitários', description: 'Veja os tops do server.', usage: 'd!toplevel' };
guildOnly: false, aliases: ['toplevel'], permLevel: 0,
model_binding_and_validation.go
package main import ( "github.com/gin-gonic/gin" "net/http" ) // Binding from JSON type Login struct { User string `form:"user" json:"user" xml:"user" binding:"required"` Password string `form:"password" json:"password" xml:"password" binding:"-"` } func main()
// go run examples/model_binding_and_validation.go
{ router := gin.Default() // Example for binding JSON ({"user": "manu", "password": "123"}) router.POST("/loginJSON", func(c *gin.Context) { var json Login if err := c.ShouldBindJSON(&json); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } if json.User != "manu" || json.Password != "123" { c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) return } c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) }) // Example for binding XML ( // <?xml version="1.0" encoding="UTF-8"?> // <root> // <user>user</user> // <password>123</password> // </root>) router.POST("/loginXML", func(c *gin.Context) { var xml Login if err := c.ShouldBindXML(&xml); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } if xml.User != "manu" || xml.Password != "123" { c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) return } c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) }) // Example for binding a HTML form (user=manu&password=123) router.POST("/loginForm", func(c *gin.Context) { var form Login // This will infer what binder to use depending on the content-type header. if err := c.ShouldBind(&form); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } if form.User != "manu" || form.Password != "123" { c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) return } c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) }) // Listen and serve on 0.0.0.0:8080 router.Run(":8080") }
Sum_of_numbers_Recursion.py
print("To print the sum of numbers using recursion") def calculatatesum(num):
n=int(input("Enter the Number value:")) print("The Sum of numbers is,",calculatatesum(n))
if(num): a=num+calculatatesum(num-1) return a else: return 0
rules.py
import string def z_array(s): # NOTE: # Use Z algorithm (Gusfield theorem 1.4.1) to preprocess s. assert len(s) > 1 z = [len(s)] + [0] * (len(s) - 1) # Initial comparison for s[1:] with prefix for i in range(1, len(s)): if s[i] == s[i - 1]: z[1] += 1 else: break r, l = 0, 0 if z[l] > 0: r, l = z[1], 1 for k in range(2, len(s)): assert z[k] == 0 if k > r: # Case 1 for i in range(k, len(s)): if s[i] == s[i - k]: z[k] += 1 else: break r, l = k + z[k] - 1, k else: # Case 2 # Calculate length of Beta nbeta = r - k + 1 zkp = z[k - l] if nbeta > zkp: # Case 2a: Zkp wins z[k] = zkp else: # Case 2b: Compare characters just past r nmatch = 0 for i in range(r + 1, len(s)): if s[i] == s[i - k]: nmatch += 1 else: break l, r = k, r + nmatch z[k] = r - k + 1 return z def n_array(s): # NOTE: # Compile the N array (Gusfield theorem 2.2.2) from the z array. return z_array(s[::-1])[::-1] def
(p, n): # NOTE: # Compile the N array (Gusfield theorem 2.2.2) using p and N array. # L'[i] = largest index j less than n such that N[j] = |P[i:]| lp = [0] * len(p) for j in range(len(p) - 1): i = len(n) - n[j] if i < len(p): lp[i] = j + 1 return lp def big_l_array(p, lp): # NOTE: # Compile L array (Gusfield theorem 2.2.2) using p and L' array. # L[i] = largest index j less than n such that N[j] >= |P[i:]| l = [0] * len(p) l[1] = lp[1] for i in range(2, len(p)): l[i] = max(l[i - 1], lp[i]) return l def small_l_prime_array(n): # NOTE: # Compile lp' array (Gusfield theorem 2.2.4) using N array. small_lp = [0] * len(n) for i in range(len(n)): if n[i] == i + 1: # Prefix matching a suffix small_lp[len(n) - i - 1] = i + 1 for i in range(len(n) - 2, -1, -1): # "Smear" them out to the left if small_lp[i] == 0: small_lp[i] = small_lp[i + 1] return small_lp def good_suffix_table(p): # NOTE: # Return table needed to apply good suffix rule. n = n_array(p) lp = big_l_prime_array(p, n) return lp, big_l_array(p, lp), small_l_prime_array(n) def good_suffix_mismatch(i, big_l_prime, small_l_prime): # NOTE: # Given a mismatch at offset i, and given L/L' and l' arrays, # return amount to shift as determined by good suffix rule. length = len(big_l_prime) assert i < length if i == length - 1: return 0 i += 1 # Points to leftmost matching position of P if big_l_prime[i] > 0: return length - big_l_prime[i] return length - small_l_prime[i] def good_suffix_match(small_l_prime): # NOTE: # Given a full match of P to T, return amount to shift as determined by # good suffix rule. return len(small_l_prime) - small_l_prime[1] def dense_bad_char_tab(p, amap): # NOTE: # Given pattern string and list with ordered alphabet characters, create # and return a dense bad character table. Table is indexed by offset then # by character. tab = [] nxt = [0] * len(amap) for i in range(0, len(p)): c = p[i] assert c in amap tab.append(nxt[:]) nxt[amap[c]] = i + 1 return tab class BoyerMoore(object): # NOTE: # Encapsulates pattern and associated Boyer-Moore preprocessing. def __init__(self, p, alphabet="ACGT"): self.p = p self.alphabet = alphabet # Create map from alphabet characters to integers self.amap = {} for i in range(len(self.alphabet)): self.amap[self.alphabet[i]] = i # Make bad character rule table self.bad_char = dense_bad_char_tab(p, self.amap) # Create good suffix rule table _, self.big_l, self.small_l_prime = good_suffix_table(p) def bad_character_rule(self, i, c): # NOTE: # Return number of skips given by bad character rule at offset i. assert c in self.amap ci = self.amap[c] assert i > (self.bad_char[i][ci] - 1) return i - (self.bad_char[i][ci] - 1) def good_suffix_rule(self, i): # NOTE: # Given a mismatch at offset i, return amount to shift as determined # by (weak) good suffix rule. length = len(self.big_l) assert i < length if i == length - 1: return 0 i += 1 # i points to leftmost matching position of P if self.big_l[i] > 0: return length - self.big_l[i] return length - self.small_l_prime[i] def match_skip(self): # NOTE: # Return amount to shift in case where P matches T return len(self.small_l_prime) - self.small_l_prime[1]
big_l_prime_array
controller.go
package deploy import ( "bufio" "bytes" "context" "crypto/sha256" "encoding/hex" "io" "io/ioutil" "os" "path/filepath" "strings" "time" errors2 "github.com/pkg/errors" v12 "github.com/rancher/k3s/pkg/apis/k3s.cattle.io/v1" v1 "github.com/rancher/k3s/pkg/generated/controllers/k3s.cattle.io/v1" "github.com/rancher/wrangler/pkg/apply" "github.com/rancher/wrangler/pkg/merr" "github.com/rancher/wrangler/pkg/objectset" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" yamlDecoder "k8s.io/apimachinery/pkg/util/yaml" ) const ( ns = "kube-system" startKey = "_start_" ) func WatchFiles(ctx context.Context, apply apply.Apply, addons v1.AddonController, bases ...string) error { w := &watcher{ apply: apply, addonCache: addons.Cache(), addons: addons, bases: bases, } addons.Enqueue("", startKey) addons.OnChange(ctx, "addon-start", func(key string, _ *v12.Addon) (*v12.Addon, error) { if key == startKey { go w.start(ctx) } return nil, nil }) return nil } type watcher struct { apply apply.Apply addonCache v1.AddonCache addons v1.AddonClient bases []string }
if err := w.listFiles(force); err == nil { force = false } else { logrus.Errorf("failed to process config: %v", err) } select { case <-ctx.Done(): return case <-time.After(15 * time.Second): } } } func (w *watcher) listFiles(force bool) error { var errs []error for _, base := range w.bases { if err := w.listFilesIn(base, force); err != nil { errs = append(errs, err) } } return merr.NewErrors(errs...) } func (w *watcher) listFilesIn(base string, force bool) error { files := map[string]os.FileInfo{} if err := filepath.Walk(base, func(path string, info os.FileInfo, err error) error { if err != nil { return err } files[path] = info return nil }); err != nil { return err } skips := map[string]bool{} for _, file := range files { if strings.HasSuffix(file.Name(), ".skip") { skips[strings.TrimSuffix(file.Name(), ".skip")] = true } } var errs []error for path, file := range files { if skipFile(file.Name(), skips) { continue } if err := w.deploy(path, !force); err != nil { errs = append(errs, errors2.Wrapf(err, "failed to process %s", path)) } } return merr.NewErrors(errs...) } func (w *watcher) deploy(path string, compareChecksum bool) error { content, err := ioutil.ReadFile(path) if err != nil { return err } name := name(path) addon, err := w.addon(name) if err != nil { return err } checksum := checksum(content) if compareChecksum && checksum == addon.Spec.Checksum { logrus.Debugf("Skipping existing deployment of %s, check=%v, checksum %s=%s", path, compareChecksum, checksum, addon.Spec.Checksum) return nil } objectSet, err := objectSet(content) if err != nil { return err } if err := w.apply.WithOwner(&addon).Apply(objectSet); err != nil { return err } addon.Spec.Source = path addon.Spec.Checksum = checksum addon.Status.GVKs = nil if addon.UID == "" { _, err := w.addons.Create(&addon) return err } _, err = w.addons.Update(&addon) return err } func (w *watcher) addon(name string) (v12.Addon, error) { addon, err := w.addonCache.Get(ns, name) if errors.IsNotFound(err) { addon = v12.NewAddon(ns, name, v12.Addon{}) } else if err != nil { return v12.Addon{}, err } return *addon, nil } func objectSet(content []byte) (*objectset.ObjectSet, error) { objs, err := yamlToObjects(bytes.NewBuffer(content)) if err != nil { return nil, err } os := objectset.NewObjectSet() os.Add(objs...) return os, nil } func name(path string) string { name := filepath.Base(path) return strings.SplitN(name, ".", 2)[0] } func checksum(bytes []byte) string { d := sha256.Sum256(bytes) return hex.EncodeToString(d[:]) } func isEmptyYaml(yaml []byte) bool { isEmpty := true lines := bytes.Split(yaml, []byte("\n")) for _, l := range lines { s := bytes.TrimSpace(l) if string(s) != "---" && !bytes.HasPrefix(s, []byte("#")) && string(s) != "" { isEmpty = false } } return isEmpty } func yamlToObjects(in io.Reader) ([]runtime.Object, error) { var result []runtime.Object reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096)) for { raw, err := reader.Read() if err == io.EOF { break } if err != nil { return nil, err } if !isEmptyYaml(raw) { obj, err := toObjects(raw) if err != nil { return nil, err } result = append(result, obj...) } } return result, nil } func toObjects(bytes []byte) ([]runtime.Object, error) { bytes, err := yamlDecoder.ToJSON(bytes) if err != nil { return nil, err } obj, _, err := unstructured.UnstructuredJSONScheme.Decode(bytes, nil, nil) if err != nil { return nil, err } if l, ok := obj.(*unstructured.UnstructuredList); ok { var result []runtime.Object for _, obj := range l.Items { copy := obj result = append(result, &copy) } return result, nil } return []runtime.Object{obj}, nil } func skipFile(fileName string, skips map[string]bool) bool { switch { case strings.HasPrefix(fileName, "."): return true case skips[fileName]: return true case strings.HasSuffix(fileName, ".json"): return false case strings.HasSuffix(fileName, ".yml"): return false case strings.HasSuffix(fileName, ".yaml"): return false default: return true } }
func (w *watcher) start(ctx context.Context) { force := true for {
input.rs
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT. use std::fmt::Write; /// See [`BatchAssociateScramSecretInput`](crate::input::BatchAssociateScramSecretInput) pub mod batch_associate_scram_secret_input { /// A builder for [`BatchAssociateScramSecretInput`](crate::input::BatchAssociateScramSecretInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) secret_arn_list: std::option::Option<std::vec::Vec<std::string::String>>, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// Appends an item to `secret_arn_list`. /// /// To override the contents of this collection use [`set_secret_arn_list`](Self::set_secret_arn_list). /// /// <p>List of AWS Secrets Manager secret ARNs.</p> pub fn secret_arn_list(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.secret_arn_list.unwrap_or_default(); v.push(input.into()); self.secret_arn_list = Some(v); self } /// <p>List of AWS Secrets Manager secret ARNs.</p> pub fn set_secret_arn_list( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.secret_arn_list = input; self } /// Consumes the builder and constructs a [`BatchAssociateScramSecretInput`](crate::input::BatchAssociateScramSecretInput) pub fn build( self, ) -> std::result::Result< crate::input::BatchAssociateScramSecretInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::BatchAssociateScramSecretInput { cluster_arn: self.cluster_arn, secret_arn_list: self.secret_arn_list, }) } } } #[doc(hidden)] pub type BatchAssociateScramSecretInputOperationOutputAlias = crate::operation::BatchAssociateScramSecret; #[doc(hidden)] pub type BatchAssociateScramSecretInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl BatchAssociateScramSecretInput { /// Consumes the builder and constructs an Operation<[`BatchAssociateScramSecret`](crate::operation::BatchAssociateScramSecret)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::BatchAssociateScramSecret, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::BatchAssociateScramSecretInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_1 = &_input.cluster_arn; let input_1 = input_1 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_1, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/scram-secrets", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::BatchAssociateScramSecretInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::BatchAssociateScramSecretInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_batch_associate_scram_secret( &self, )?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::BatchAssociateScramSecret::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "BatchAssociateScramSecret", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`BatchAssociateScramSecretInput`](crate::input::BatchAssociateScramSecretInput) pub fn builder() -> crate::input::batch_associate_scram_secret_input::Builder { crate::input::batch_associate_scram_secret_input::Builder::default() } } /// See [`BatchDisassociateScramSecretInput`](crate::input::BatchDisassociateScramSecretInput) pub mod batch_disassociate_scram_secret_input { /// A builder for [`BatchDisassociateScramSecretInput`](crate::input::BatchDisassociateScramSecretInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) secret_arn_list: std::option::Option<std::vec::Vec<std::string::String>>, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// Appends an item to `secret_arn_list`. /// /// To override the contents of this collection use [`set_secret_arn_list`](Self::set_secret_arn_list). /// /// <p>List of AWS Secrets Manager secret ARNs.</p> pub fn secret_arn_list(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.secret_arn_list.unwrap_or_default(); v.push(input.into()); self.secret_arn_list = Some(v); self } /// <p>List of AWS Secrets Manager secret ARNs.</p> pub fn set_secret_arn_list( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.secret_arn_list = input; self } /// Consumes the builder and constructs a [`BatchDisassociateScramSecretInput`](crate::input::BatchDisassociateScramSecretInput) pub fn build( self, ) -> std::result::Result< crate::input::BatchDisassociateScramSecretInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::BatchDisassociateScramSecretInput { cluster_arn: self.cluster_arn, secret_arn_list: self.secret_arn_list, }) } } } #[doc(hidden)] pub type BatchDisassociateScramSecretInputOperationOutputAlias = crate::operation::BatchDisassociateScramSecret; #[doc(hidden)] pub type BatchDisassociateScramSecretInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl BatchDisassociateScramSecretInput { /// Consumes the builder and constructs an Operation<[`BatchDisassociateScramSecret`](crate::operation::BatchDisassociateScramSecret)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::BatchDisassociateScramSecret, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::BatchDisassociateScramSecretInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_2 = &_input.cluster_arn; let input_2 = input_2 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_2, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/scram-secrets", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::BatchDisassociateScramSecretInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PATCH").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::BatchDisassociateScramSecretInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_batch_disassociate_scram_secret(&self)? ; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::BatchDisassociateScramSecret::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "BatchDisassociateScramSecret", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`BatchDisassociateScramSecretInput`](crate::input::BatchDisassociateScramSecretInput) pub fn builder() -> crate::input::batch_disassociate_scram_secret_input::Builder { crate::input::batch_disassociate_scram_secret_input::Builder::default() } } /// See [`CreateClusterInput`](crate::input::CreateClusterInput) pub mod create_cluster_input { /// A builder for [`CreateClusterInput`](crate::input::CreateClusterInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) broker_node_group_info: std::option::Option<crate::model::BrokerNodeGroupInfo>, pub(crate) client_authentication: std::option::Option<crate::model::ClientAuthentication>, pub(crate) cluster_name: std::option::Option<std::string::String>, pub(crate) configuration_info: std::option::Option<crate::model::ConfigurationInfo>, pub(crate) encryption_info: std::option::Option<crate::model::EncryptionInfo>, pub(crate) enhanced_monitoring: std::option::Option<crate::model::EnhancedMonitoring>, pub(crate) open_monitoring: std::option::Option<crate::model::OpenMonitoringInfo>, pub(crate) kafka_version: std::option::Option<std::string::String>, pub(crate) logging_info: std::option::Option<crate::model::LoggingInfo>, pub(crate) number_of_broker_nodes: std::option::Option<i32>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, } impl Builder { /// <p>Information about the broker nodes in the cluster.</p> pub fn broker_node_group_info(mut self, input: crate::model::BrokerNodeGroupInfo) -> Self { self.broker_node_group_info = Some(input); self } /// <p>Information about the broker nodes in the cluster.</p> pub fn set_broker_node_group_info( mut self, input: std::option::Option<crate::model::BrokerNodeGroupInfo>, ) -> Self { self.broker_node_group_info = input; self } /// <p>Includes all client authentication related information.</p> pub fn client_authentication(mut self, input: crate::model::ClientAuthentication) -> Self { self.client_authentication = Some(input); self } /// <p>Includes all client authentication related information.</p> pub fn set_client_authentication( mut self, input: std::option::Option<crate::model::ClientAuthentication>, ) -> Self { self.client_authentication = input; self } /// <p>The name of the cluster.</p> pub fn cluster_name(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_name = Some(input.into()); self } /// <p>The name of the cluster.</p> pub fn set_cluster_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_name = input; self } /// <p>Represents the configuration that you want MSK to use for the brokers in a cluster.</p> pub fn configuration_info(mut self, input: crate::model::ConfigurationInfo) -> Self { self.configuration_info = Some(input); self } /// <p>Represents the configuration that you want MSK to use for the brokers in a cluster.</p> pub fn set_configuration_info( mut self, input: std::option::Option<crate::model::ConfigurationInfo>, ) -> Self { self.configuration_info = input; self } /// <p>Includes all encryption-related information.</p> pub fn encryption_info(mut self, input: crate::model::EncryptionInfo) -> Self { self.encryption_info = Some(input); self } /// <p>Includes all encryption-related information.</p> pub fn set_encryption_info( mut self, input: std::option::Option<crate::model::EncryptionInfo>, ) -> Self { self.encryption_info = input; self } /// <p>Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION.</p> pub fn enhanced_monitoring(mut self, input: crate::model::EnhancedMonitoring) -> Self { self.enhanced_monitoring = Some(input); self } /// <p>Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION.</p> pub fn set_enhanced_monitoring( mut self, input: std::option::Option<crate::model::EnhancedMonitoring>, ) -> Self { self.enhanced_monitoring = input; self } /// <p>The settings for open monitoring.</p> pub fn open_monitoring(mut self, input: crate::model::OpenMonitoringInfo) -> Self { self.open_monitoring = Some(input); self } /// <p>The settings for open monitoring.</p> pub fn set_open_monitoring( mut self, input: std::option::Option<crate::model::OpenMonitoringInfo>, ) -> Self { self.open_monitoring = input; self } /// <p>The version of Apache Kafka.</p> pub fn kafka_version(mut self, input: impl Into<std::string::String>) -> Self { self.kafka_version = Some(input.into()); self } /// <p>The version of Apache Kafka.</p> pub fn set_kafka_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.kafka_version = input; self } #[allow(missing_docs)] // documentation missing in model pub fn logging_info(mut self, input: crate::model::LoggingInfo) -> Self { self.logging_info = Some(input); self } #[allow(missing_docs)] // documentation missing in model pub fn set_logging_info( mut self, input: std::option::Option<crate::model::LoggingInfo>, ) -> Self { self.logging_info = input; self } /// <p>The number of broker nodes in the cluster.</p> pub fn number_of_broker_nodes(mut self, input: i32) -> Self { self.number_of_broker_nodes = Some(input); self } /// <p>The number of broker nodes in the cluster.</p> pub fn set_number_of_broker_nodes(mut self, input: std::option::Option<i32>) -> Self { self.number_of_broker_nodes = input; self } /// Adds a key-value pair to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>Create tags when creating the cluster.</p> pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } /// <p>Create tags when creating the cluster.</p> pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// Consumes the builder and constructs a [`CreateClusterInput`](crate::input::CreateClusterInput) pub fn build( self, ) -> std::result::Result< crate::input::CreateClusterInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::CreateClusterInput { broker_node_group_info: self.broker_node_group_info, client_authentication: self.client_authentication, cluster_name: self.cluster_name, configuration_info: self.configuration_info, encryption_info: self.encryption_info, enhanced_monitoring: self.enhanced_monitoring, open_monitoring: self.open_monitoring, kafka_version: self.kafka_version, logging_info: self.logging_info, number_of_broker_nodes: self.number_of_broker_nodes.unwrap_or_default(), tags: self.tags, }) } } } #[doc(hidden)] pub type CreateClusterInputOperationOutputAlias = crate::operation::CreateCluster; #[doc(hidden)] pub type CreateClusterInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl CreateClusterInput { /// Consumes the builder and constructs an Operation<[`CreateCluster`](crate::operation::CreateCluster)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::CreateCluster, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::CreateClusterInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { write!(output, "/v1/clusters").expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::CreateClusterInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::CreateClusterInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_create_cluster(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::CreateCluster::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "CreateCluster", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`CreateClusterInput`](crate::input::CreateClusterInput) pub fn builder() -> crate::input::create_cluster_input::Builder { crate::input::create_cluster_input::Builder::default() } } /// See [`CreateConfigurationInput`](crate::input::CreateConfigurationInput) pub mod create_configuration_input { /// A builder for [`CreateConfigurationInput`](crate::input::CreateConfigurationInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) description: std::option::Option<std::string::String>, pub(crate) kafka_versions: std::option::Option<std::vec::Vec<std::string::String>>, pub(crate) name: std::option::Option<std::string::String>, pub(crate) server_properties: std::option::Option<aws_smithy_types::Blob>, } impl Builder { /// <p>The description of the configuration.</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.description = Some(input.into()); self } /// <p>The description of the configuration.</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.description = input; self } /// Appends an item to `kafka_versions`. /// /// To override the contents of this collection use [`set_kafka_versions`](Self::set_kafka_versions). /// /// <p>The versions of Apache Kafka with which you can use this MSK configuration.</p> pub fn kafka_versions(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.kafka_versions.unwrap_or_default(); v.push(input.into()); self.kafka_versions = Some(v); self } /// <p>The versions of Apache Kafka with which you can use this MSK configuration.</p> pub fn set_kafka_versions( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.kafka_versions = input; self } /// <p>The name of the configuration.</p> pub fn name(mut self, input: impl Into<std::string::String>) -> Self { self.name = Some(input.into()); self } /// <p>The name of the configuration.</p> pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self { self.name = input; self } /// <p>Contents of the <filename>server.properties</filename> file. When using the API, you must ensure that the contents of the file are base64 encoded. /// When using the AWS Management Console, the SDK, or the AWS CLI, the contents of <filename>server.properties</filename> can be in plaintext.</p> pub fn server_properties(mut self, input: aws_smithy_types::Blob) -> Self { self.server_properties = Some(input); self } /// <p>Contents of the <filename>server.properties</filename> file. When using the API, you must ensure that the contents of the file are base64 encoded. /// When using the AWS Management Console, the SDK, or the AWS CLI, the contents of <filename>server.properties</filename> can be in plaintext.</p> pub fn set_server_properties( mut self, input: std::option::Option<aws_smithy_types::Blob>, ) -> Self { self.server_properties = input; self } /// Consumes the builder and constructs a [`CreateConfigurationInput`](crate::input::CreateConfigurationInput) pub fn build( self, ) -> std::result::Result< crate::input::CreateConfigurationInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::CreateConfigurationInput { description: self.description, kafka_versions: self.kafka_versions, name: self.name, server_properties: self.server_properties, }) } } } #[doc(hidden)] pub type CreateConfigurationInputOperationOutputAlias = crate::operation::CreateConfiguration; #[doc(hidden)] pub type CreateConfigurationInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl CreateConfigurationInput { /// Consumes the builder and constructs an Operation<[`CreateConfiguration`](crate::operation::CreateConfiguration)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::CreateConfiguration, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::CreateConfigurationInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { write!(output, "/v1/configurations").expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::CreateConfigurationInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::CreateConfigurationInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_create_configuration(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::CreateConfiguration::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "CreateConfiguration", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`CreateConfigurationInput`](crate::input::CreateConfigurationInput) pub fn builder() -> crate::input::create_configuration_input::Builder { crate::input::create_configuration_input::Builder::default() } } /// See [`DeleteClusterInput`](crate::input::DeleteClusterInput) pub mod delete_cluster_input { /// A builder for [`DeleteClusterInput`](crate::input::DeleteClusterInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) current_version: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The current version of the MSK cluster.</p> pub fn current_version(mut self, input: impl Into<std::string::String>) -> Self { self.current_version = Some(input.into()); self } /// <p>The current version of the MSK cluster.</p> pub fn set_current_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.current_version = input; self } /// Consumes the builder and constructs a [`DeleteClusterInput`](crate::input::DeleteClusterInput) pub fn build( self, ) -> std::result::Result< crate::input::DeleteClusterInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::DeleteClusterInput { cluster_arn: self.cluster_arn, current_version: self.current_version, }) } } } #[doc(hidden)] pub type DeleteClusterInputOperationOutputAlias = crate::operation::DeleteCluster; #[doc(hidden)] pub type DeleteClusterInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl DeleteClusterInput { /// Consumes the builder and constructs an Operation<[`DeleteCluster`](crate::operation::DeleteCluster)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::DeleteCluster, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::DeleteClusterInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_3 = &_input.cluster_arn; let input_3 = input_3 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_3, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::DeleteClusterInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if let Some(inner_4) = &_input.current_version { query.push_kv( "currentVersion", &aws_smithy_http::query::fmt_string(&inner_4), ); } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::DeleteClusterInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("DELETE").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::DeleteClusterInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::DeleteCluster::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "DeleteCluster", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`DeleteClusterInput`](crate::input::DeleteClusterInput) pub fn builder() -> crate::input::delete_cluster_input::Builder { crate::input::delete_cluster_input::Builder::default() } } /// See [`DeleteConfigurationInput`](crate::input::DeleteConfigurationInput) pub mod delete_configuration_input { /// A builder for [`DeleteConfigurationInput`](crate::input::DeleteConfigurationInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// Consumes the builder and constructs a [`DeleteConfigurationInput`](crate::input::DeleteConfigurationInput) pub fn build( self, ) -> std::result::Result< crate::input::DeleteConfigurationInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::DeleteConfigurationInput { arn: self.arn }) } } } #[doc(hidden)] pub type DeleteConfigurationInputOperationOutputAlias = crate::operation::DeleteConfiguration; #[doc(hidden)] pub type DeleteConfigurationInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl DeleteConfigurationInput { /// Consumes the builder and constructs an Operation<[`DeleteConfiguration`](crate::operation::DeleteConfiguration)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::DeleteConfiguration, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::DeleteConfigurationInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_5 = &_input.arn; let input_5 = input_5 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", })?; let arn = aws_smithy_http::label::fmt_string(input_5, false); if arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", }); } write!(output, "/v1/configurations/{Arn}", Arn = arn) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::DeleteConfigurationInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("DELETE").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::DeleteConfigurationInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::DeleteConfiguration::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "DeleteConfiguration", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`DeleteConfigurationInput`](crate::input::DeleteConfigurationInput) pub fn builder() -> crate::input::delete_configuration_input::Builder { crate::input::delete_configuration_input::Builder::default() } } /// See [`DescribeClusterInput`](crate::input::DescribeClusterInput) pub mod describe_cluster_input { /// A builder for [`DescribeClusterInput`](crate::input::DescribeClusterInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// Consumes the builder and constructs a [`DescribeClusterInput`](crate::input::DescribeClusterInput) pub fn build( self, ) -> std::result::Result< crate::input::DescribeClusterInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::DescribeClusterInput { cluster_arn: self.cluster_arn, }) } } } #[doc(hidden)] pub type DescribeClusterInputOperationOutputAlias = crate::operation::DescribeCluster; #[doc(hidden)] pub type DescribeClusterInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl DescribeClusterInput { /// Consumes the builder and constructs an Operation<[`DescribeCluster`](crate::operation::DescribeCluster)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::DescribeCluster, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::DescribeClusterInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_6 = &_input.cluster_arn; let input_6 = input_6 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_6, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::DescribeClusterInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::DescribeClusterInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::DescribeCluster::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "DescribeCluster", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`DescribeClusterInput`](crate::input::DescribeClusterInput) pub fn builder() -> crate::input::describe_cluster_input::Builder { crate::input::describe_cluster_input::Builder::default() } } /// See [`DescribeClusterOperationInput`](crate::input::DescribeClusterOperationInput) pub mod describe_cluster_operation_input { /// A builder for [`DescribeClusterOperationInput`](crate::input::DescribeClusterOperationInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_operation_arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the MSK cluster operation.</p> pub fn cluster_operation_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_operation_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the MSK cluster operation.</p> pub fn set_cluster_operation_arn( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.cluster_operation_arn = input; self } /// Consumes the builder and constructs a [`DescribeClusterOperationInput`](crate::input::DescribeClusterOperationInput) pub fn build( self, ) -> std::result::Result< crate::input::DescribeClusterOperationInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::DescribeClusterOperationInput { cluster_operation_arn: self.cluster_operation_arn, }) } } } #[doc(hidden)] pub type DescribeClusterOperationInputOperationOutputAlias = crate::operation::DescribeClusterOperation; #[doc(hidden)] pub type DescribeClusterOperationInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl DescribeClusterOperationInput { /// Consumes the builder and constructs an Operation<[`DescribeClusterOperation`](crate::operation::DescribeClusterOperation)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::DescribeClusterOperation, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::DescribeClusterOperationInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_7 = &_input.cluster_operation_arn; let input_7 = input_7 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_operation_arn", details: "cannot be empty or unset", })?; let cluster_operation_arn = aws_smithy_http::label::fmt_string(input_7, false); if cluster_operation_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_operation_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/operations/{ClusterOperationArn}", ClusterOperationArn = cluster_operation_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::DescribeClusterOperationInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::DescribeClusterOperationInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::DescribeClusterOperation::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "DescribeClusterOperation", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`DescribeClusterOperationInput`](crate::input::DescribeClusterOperationInput) pub fn builder() -> crate::input::describe_cluster_operation_input::Builder { crate::input::describe_cluster_operation_input::Builder::default() } } /// See [`DescribeConfigurationInput`](crate::input::DescribeConfigurationInput) pub mod describe_configuration_input { /// A builder for [`DescribeConfigurationInput`](crate::input::DescribeConfigurationInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// Consumes the builder and constructs a [`DescribeConfigurationInput`](crate::input::DescribeConfigurationInput) pub fn build( self, ) -> std::result::Result< crate::input::DescribeConfigurationInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::DescribeConfigurationInput { arn: self.arn }) } } } #[doc(hidden)] pub type DescribeConfigurationInputOperationOutputAlias = crate::operation::DescribeConfiguration; #[doc(hidden)] pub type DescribeConfigurationInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl DescribeConfigurationInput { /// Consumes the builder and constructs an Operation<[`DescribeConfiguration`](crate::operation::DescribeConfiguration)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::DescribeConfiguration, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::DescribeConfigurationInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_8 = &_input.arn; let input_8 = input_8 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", })?; let arn = aws_smithy_http::label::fmt_string(input_8, false); if arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", }); } write!(output, "/v1/configurations/{Arn}", Arn = arn) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::DescribeConfigurationInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::DescribeConfigurationInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::DescribeConfiguration::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "DescribeConfiguration", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`DescribeConfigurationInput`](crate::input::DescribeConfigurationInput) pub fn builder() -> crate::input::describe_configuration_input::Builder { crate::input::describe_configuration_input::Builder::default() } } /// See [`DescribeConfigurationRevisionInput`](crate::input::DescribeConfigurationRevisionInput) pub mod describe_configuration_revision_input { /// A builder for [`DescribeConfigurationRevisionInput`](crate::input::DescribeConfigurationRevisionInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, pub(crate) revision: std::option::Option<i64>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>A string that uniquely identifies a revision of an MSK configuration.</p> pub fn revision(mut self, input: i64) -> Self { self.revision = Some(input); self } /// <p>A string that uniquely identifies a revision of an MSK configuration.</p> pub fn set_revision(mut self, input: std::option::Option<i64>) -> Self { self.revision = input; self } /// Consumes the builder and constructs a [`DescribeConfigurationRevisionInput`](crate::input::DescribeConfigurationRevisionInput) pub fn build( self, ) -> std::result::Result< crate::input::DescribeConfigurationRevisionInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::DescribeConfigurationRevisionInput { arn: self.arn, revision: self.revision.unwrap_or_default(), }) } } } #[doc(hidden)] pub type DescribeConfigurationRevisionInputOperationOutputAlias = crate::operation::DescribeConfigurationRevision; #[doc(hidden)] pub type DescribeConfigurationRevisionInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl DescribeConfigurationRevisionInput { /// Consumes the builder and constructs an Operation<[`DescribeConfigurationRevision`](crate::operation::DescribeConfigurationRevision)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::DescribeConfigurationRevision, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::DescribeConfigurationRevisionInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_9 = &_input.arn; let input_9 = input_9 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", })?; let arn = aws_smithy_http::label::fmt_string(input_9, false); if arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", }); } let input_10 = &_input.revision; let mut revision_encoder = aws_smithy_types::primitive::Encoder::from(*input_10); let revision = revision_encoder.encode(); if revision.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "revision", details: "cannot be empty or unset", }); } write!( output, "/v1/configurations/{Arn}/revisions/{Revision}", Arn = arn, Revision = revision ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::DescribeConfigurationRevisionInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::DescribeConfigurationRevisionInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::DescribeConfigurationRevision::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "DescribeConfigurationRevision", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`DescribeConfigurationRevisionInput`](crate::input::DescribeConfigurationRevisionInput) pub fn builder() -> crate::input::describe_configuration_revision_input::Builder { crate::input::describe_configuration_revision_input::Builder::default() } } /// See [`GetBootstrapBrokersInput`](crate::input::GetBootstrapBrokersInput) pub mod get_bootstrap_brokers_input { /// A builder for [`GetBootstrapBrokersInput`](crate::input::GetBootstrapBrokersInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// Consumes the builder and constructs a [`GetBootstrapBrokersInput`](crate::input::GetBootstrapBrokersInput) pub fn build( self, ) -> std::result::Result< crate::input::GetBootstrapBrokersInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::GetBootstrapBrokersInput { cluster_arn: self.cluster_arn, }) } } } #[doc(hidden)] pub type GetBootstrapBrokersInputOperationOutputAlias = crate::operation::GetBootstrapBrokers; #[doc(hidden)] pub type GetBootstrapBrokersInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl GetBootstrapBrokersInput { /// Consumes the builder and constructs an Operation<[`GetBootstrapBrokers`](crate::operation::GetBootstrapBrokers)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::GetBootstrapBrokers, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::GetBootstrapBrokersInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_11 = &_input.cluster_arn; let input_11 = input_11 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_11, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/bootstrap-brokers", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::GetBootstrapBrokersInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::GetBootstrapBrokersInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::GetBootstrapBrokers::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "GetBootstrapBrokers", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`GetBootstrapBrokersInput`](crate::input::GetBootstrapBrokersInput) pub fn builder() -> crate::input::get_bootstrap_brokers_input::Builder { crate::input::get_bootstrap_brokers_input::Builder::default() } } /// See [`GetCompatibleKafkaVersionsInput`](crate::input::GetCompatibleKafkaVersionsInput) pub mod get_compatible_kafka_versions_input { /// A builder for [`GetCompatibleKafkaVersionsInput`](crate::input::GetCompatibleKafkaVersionsInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the cluster check.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the cluster check.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// Consumes the builder and constructs a [`GetCompatibleKafkaVersionsInput`](crate::input::GetCompatibleKafkaVersionsInput) pub fn build( self, ) -> std::result::Result< crate::input::GetCompatibleKafkaVersionsInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::GetCompatibleKafkaVersionsInput { cluster_arn: self.cluster_arn, }) } } } #[doc(hidden)] pub type GetCompatibleKafkaVersionsInputOperationOutputAlias = crate::operation::GetCompatibleKafkaVersions; #[doc(hidden)] pub type GetCompatibleKafkaVersionsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl GetCompatibleKafkaVersionsInput { /// Consumes the builder and constructs an Operation<[`GetCompatibleKafkaVersions`](crate::operation::GetCompatibleKafkaVersions)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::GetCompatibleKafkaVersions, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::GetCompatibleKafkaVersionsInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { write!(output, "/v1/compatible-kafka-versions").expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::GetCompatibleKafkaVersionsInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if let Some(inner_12) = &_input.cluster_arn { query.push_kv("clusterArn", &aws_smithy_http::query::fmt_string(&inner_12)); } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::GetCompatibleKafkaVersionsInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::GetCompatibleKafkaVersionsInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::GetCompatibleKafkaVersions::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "GetCompatibleKafkaVersions", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`GetCompatibleKafkaVersionsInput`](crate::input::GetCompatibleKafkaVersionsInput) pub fn builder() -> crate::input::get_compatible_kafka_versions_input::Builder { crate::input::get_compatible_kafka_versions_input::Builder::default() } } /// See [`ListClusterOperationsInput`](crate::input::ListClusterOperationsInput) pub mod list_cluster_operations_input { /// A builder for [`ListClusterOperationsInput`](crate::input::ListClusterOperationsInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListClusterOperationsInput`](crate::input::ListClusterOperationsInput) pub fn build( self, ) -> std::result::Result< crate::input::ListClusterOperationsInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::ListClusterOperationsInput { cluster_arn: self.cluster_arn, max_results: self.max_results.unwrap_or_default(), next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListClusterOperationsInputOperationOutputAlias = crate::operation::ListClusterOperations; #[doc(hidden)] pub type ListClusterOperationsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListClusterOperationsInput { /// Consumes the builder and constructs an Operation<[`ListClusterOperations`](crate::operation::ListClusterOperations)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::ListClusterOperations, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::ListClusterOperationsInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_13 = &_input.cluster_arn; let input_13 = input_13 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_13, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/operations", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::ListClusterOperationsInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if _input.max_results != 0 { query.push_kv( "maxResults", aws_smithy_types::primitive::Encoder::from(_input.max_results).encode(), ); } if let Some(inner_14) = &_input.next_token { query.push_kv("nextToken", &aws_smithy_http::query::fmt_string(&inner_14)); } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::ListClusterOperationsInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::ListClusterOperationsInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::ListClusterOperations::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "ListClusterOperations", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListClusterOperationsInput`](crate::input::ListClusterOperationsInput) pub fn builder() -> crate::input::list_cluster_operations_input::Builder { crate::input::list_cluster_operations_input::Builder::default() } } /// See [`ListClustersInput`](crate::input::ListClustersInput) pub mod list_clusters_input { /// A builder for [`ListClustersInput`](crate::input::ListClustersInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_name_filter: std::option::Option<std::string::String>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>Specify a prefix of the name of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.</p> pub fn cluster_name_filter(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_name_filter = Some(input.into()); self } /// <p>Specify a prefix of the name of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.</p> pub fn set_cluster_name_filter( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.cluster_name_filter = input; self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListClustersInput`](crate::input::ListClustersInput) pub fn build( self, ) -> std::result::Result< crate::input::ListClustersInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::ListClustersInput { cluster_name_filter: self.cluster_name_filter, max_results: self.max_results.unwrap_or_default(), next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListClustersInputOperationOutputAlias = crate::operation::ListClusters; #[doc(hidden)] pub type ListClustersInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListClustersInput { /// Consumes the builder and constructs an Operation<[`ListClusters`](crate::operation::ListClusters)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::ListClusters, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::ListClustersInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { write!(output, "/v1/clusters").expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::ListClustersInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if let Some(inner_15) = &_input.cluster_name_filter { query.push_kv( "clusterNameFilter", &aws_smithy_http::query::fmt_string(&inner_15), ); } if _input.max_results != 0 { query.push_kv( "maxResults", aws_smithy_types::primitive::Encoder::from(_input.max_results).encode(), ); } if let Some(inner_16) = &_input.next_token { query.push_kv("nextToken", &aws_smithy_http::query::fmt_string(&inner_16)); } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::ListClustersInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::ListClustersInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::ListClusters::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "ListClusters", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListClustersInput`](crate::input::ListClustersInput) pub fn builder() -> crate::input::list_clusters_input::Builder { crate::input::list_clusters_input::Builder::default() } } /// See [`ListConfigurationRevisionsInput`](crate::input::ListConfigurationRevisionsInput) pub mod list_configuration_revisions_input { /// A builder for [`ListConfigurationRevisionsInput`](crate::input::ListConfigurationRevisionsInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListConfigurationRevisionsInput`](crate::input::ListConfigurationRevisionsInput) pub fn build( self, ) -> std::result::Result< crate::input::ListConfigurationRevisionsInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::ListConfigurationRevisionsInput { arn: self.arn, max_results: self.max_results.unwrap_or_default(), next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListConfigurationRevisionsInputOperationOutputAlias = crate::operation::ListConfigurationRevisions; #[doc(hidden)] pub type ListConfigurationRevisionsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListConfigurationRevisionsInput { /// Consumes the builder and constructs an Operation<[`ListConfigurationRevisions`](crate::operation::ListConfigurationRevisions)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::ListConfigurationRevisions, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::ListConfigurationRevisionsInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_17 = &_input.arn; let input_17 = input_17 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", })?; let arn = aws_smithy_http::label::fmt_string(input_17, false); if arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", }); } write!(output, "/v1/configurations/{Arn}/revisions", Arn = arn) .expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::ListConfigurationRevisionsInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if _input.max_results != 0 { query.push_kv( "maxResults", aws_smithy_types::primitive::Encoder::from(_input.max_results).encode(), ); } if let Some(inner_18) = &_input.next_token { query.push_kv("nextToken", &aws_smithy_http::query::fmt_string(&inner_18)); } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::ListConfigurationRevisionsInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::ListConfigurationRevisionsInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::ListConfigurationRevisions::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "ListConfigurationRevisions", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListConfigurationRevisionsInput`](crate::input::ListConfigurationRevisionsInput) pub fn builder() -> crate::input::list_configuration_revisions_input::Builder { crate::input::list_configuration_revisions_input::Builder::default() } } /// See [`ListConfigurationsInput`](crate::input::ListConfigurationsInput) pub mod list_configurations_input { /// A builder for [`ListConfigurationsInput`](crate::input::ListConfigurationsInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListConfigurationsInput`](crate::input::ListConfigurationsInput) pub fn build( self, ) -> std::result::Result< crate::input::ListConfigurationsInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::ListConfigurationsInput { max_results: self.max_results.unwrap_or_default(), next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListConfigurationsInputOperationOutputAlias = crate::operation::ListConfigurations; #[doc(hidden)] pub type ListConfigurationsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListConfigurationsInput { /// Consumes the builder and constructs an Operation<[`ListConfigurations`](crate::operation::ListConfigurations)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::ListConfigurations, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::ListConfigurationsInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { write!(output, "/v1/configurations").expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::ListConfigurationsInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if _input.max_results != 0 { query.push_kv( "maxResults", aws_smithy_types::primitive::Encoder::from(_input.max_results).encode(), ); } if let Some(inner_19) = &_input.next_token { query.push_kv("nextToken", &aws_smithy_http::query::fmt_string(&inner_19)); } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::ListConfigurationsInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::ListConfigurationsInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::ListConfigurations::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "ListConfigurations", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListConfigurationsInput`](crate::input::ListConfigurationsInput) pub fn builder() -> crate::input::list_configurations_input::Builder { crate::input::list_configurations_input::Builder::default() } } /// See [`ListKafkaVersionsInput`](crate::input::ListKafkaVersionsInput) pub mod list_kafka_versions_input { /// A builder for [`ListKafkaVersionsInput`](crate::input::ListKafkaVersionsInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. To get the next batch, provide this token in your next request.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. To get the next batch, provide this token in your next request.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListKafkaVersionsInput`](crate::input::ListKafkaVersionsInput) pub fn build( self, ) -> std::result::Result< crate::input::ListKafkaVersionsInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::ListKafkaVersionsInput { max_results: self.max_results.unwrap_or_default(), next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListKafkaVersionsInputOperationOutputAlias = crate::operation::ListKafkaVersions; #[doc(hidden)] pub type ListKafkaVersionsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListKafkaVersionsInput { /// Consumes the builder and constructs an Operation<[`ListKafkaVersions`](crate::operation::ListKafkaVersions)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::ListKafkaVersions, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::ListKafkaVersionsInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { write!(output, "/v1/kafka-versions").expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::ListKafkaVersionsInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if _input.max_results != 0 { query.push_kv( "maxResults", aws_smithy_types::primitive::Encoder::from(_input.max_results).encode(), ); } if let Some(inner_20) = &_input.next_token { query.push_kv("nextToken", &aws_smithy_http::query::fmt_string(&inner_20)); } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::ListKafkaVersionsInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::ListKafkaVersionsInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::ListKafkaVersions::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "ListKafkaVersions", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListKafkaVersionsInput`](crate::input::ListKafkaVersionsInput) pub fn builder() -> crate::input::list_kafka_versions_input::Builder { crate::input::list_kafka_versions_input::Builder::default() } } /// See [`ListNodesInput`](crate::input::ListNodesInput) pub mod list_nodes_input { /// A builder for [`ListNodesInput`](crate::input::ListNodesInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListNodesInput`](crate::input::ListNodesInput) pub fn build( self, ) -> std::result::Result<crate::input::ListNodesInput, aws_smithy_http::operation::BuildError> { Ok(crate::input::ListNodesInput { cluster_arn: self.cluster_arn, max_results: self.max_results.unwrap_or_default(), next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListNodesInputOperationOutputAlias = crate::operation::ListNodes; #[doc(hidden)] pub type ListNodesInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListNodesInput { /// Consumes the builder and constructs an Operation<[`ListNodes`](crate::operation::ListNodes)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::ListNodes, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::ListNodesInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_21 = &_input.cluster_arn; let input_21 = input_21 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_21, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/nodes", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::ListNodesInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if _input.max_results != 0 { query.push_kv( "maxResults", aws_smithy_types::primitive::Encoder::from(_input.max_results).encode(), ); } if let Some(inner_22) = &_input.next_token { query.push_kv("nextToken", &aws_smithy_http::query::fmt_string(&inner_22)); } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::ListNodesInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::ListNodesInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new(request, crate::operation::ListNodes::new()) .with_metadata(aws_smithy_http::operation::Metadata::new( "ListNodes", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListNodesInput`](crate::input::ListNodesInput) pub fn builder() -> crate::input::list_nodes_input::Builder { crate::input::list_nodes_input::Builder::default() } } /// See [`ListScramSecretsInput`](crate::input::ListScramSecretsInput) pub mod list_scram_secrets_input { /// A builder for [`ListScramSecretsInput`](crate::input::ListScramSecretsInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) max_results: std::option::Option<i32>, pub(crate) next_token: std::option::Option<std::string::String>, } impl Builder { /// <p>The arn of the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The arn of the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The maxResults of the query.</p> pub fn max_results(mut self, input: i32) -> Self { self.max_results = Some(input); self } /// <p>The maxResults of the query.</p> pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self { self.max_results = input; self } /// <p>The nextToken of the query.</p> pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self { self.next_token = Some(input.into()); self } /// <p>The nextToken of the query.</p> pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self { self.next_token = input; self } /// Consumes the builder and constructs a [`ListScramSecretsInput`](crate::input::ListScramSecretsInput) pub fn build( self, ) -> std::result::Result< crate::input::ListScramSecretsInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::ListScramSecretsInput { cluster_arn: self.cluster_arn, max_results: self.max_results.unwrap_or_default(), next_token: self.next_token, }) } } } #[doc(hidden)] pub type ListScramSecretsInputOperationOutputAlias = crate::operation::ListScramSecrets; #[doc(hidden)] pub type ListScramSecretsInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListScramSecretsInput { /// Consumes the builder and constructs an Operation<[`ListScramSecrets`](crate::operation::ListScramSecrets)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::ListScramSecrets, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn
( _input: &crate::input::ListScramSecretsInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_23 = &_input.cluster_arn; let input_23 = input_23 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_23, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/scram-secrets", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::ListScramSecretsInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if _input.max_results != 0 { query.push_kv( "maxResults", aws_smithy_types::primitive::Encoder::from(_input.max_results).encode(), ); } if let Some(inner_24) = &_input.next_token { query.push_kv("nextToken", &aws_smithy_http::query::fmt_string(&inner_24)); } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::ListScramSecretsInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::ListScramSecretsInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::ListScramSecrets::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "ListScramSecrets", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListScramSecretsInput`](crate::input::ListScramSecretsInput) pub fn builder() -> crate::input::list_scram_secrets_input::Builder { crate::input::list_scram_secrets_input::Builder::default() } } /// See [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput) pub mod list_tags_for_resource_input { /// A builder for [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) resource_arn: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } /// Consumes the builder and constructs a [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput) pub fn build( self, ) -> std::result::Result< crate::input::ListTagsForResourceInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::ListTagsForResourceInput { resource_arn: self.resource_arn, }) } } } #[doc(hidden)] pub type ListTagsForResourceInputOperationOutputAlias = crate::operation::ListTagsForResource; #[doc(hidden)] pub type ListTagsForResourceInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl ListTagsForResourceInput { /// Consumes the builder and constructs an Operation<[`ListTagsForResource`](crate::operation::ListTagsForResource)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::ListTagsForResource, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::ListTagsForResourceInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_25 = &_input.resource_arn; let input_25 = input_25 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", })?; let resource_arn = aws_smithy_http::label::fmt_string(input_25, false); if resource_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", }); } write!(output, "/v1/tags/{ResourceArn}", ResourceArn = resource_arn) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::ListTagsForResourceInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("GET").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::ListTagsForResourceInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::ListTagsForResource::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "ListTagsForResource", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`ListTagsForResourceInput`](crate::input::ListTagsForResourceInput) pub fn builder() -> crate::input::list_tags_for_resource_input::Builder { crate::input::list_tags_for_resource_input::Builder::default() } } /// See [`RebootBrokerInput`](crate::input::RebootBrokerInput) pub mod reboot_broker_input { /// A builder for [`RebootBrokerInput`](crate::input::RebootBrokerInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) broker_ids: std::option::Option<std::vec::Vec<std::string::String>>, pub(crate) cluster_arn: std::option::Option<std::string::String>, } impl Builder { /// Appends an item to `broker_ids`. /// /// To override the contents of this collection use [`set_broker_ids`](Self::set_broker_ids). /// /// <p>The list of broker IDs to be rebooted. The reboot-broker operation supports rebooting one broker at a time.</p> pub fn broker_ids(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.broker_ids.unwrap_or_default(); v.push(input.into()); self.broker_ids = Some(v); self } /// <p>The list of broker IDs to be rebooted. The reboot-broker operation supports rebooting one broker at a time.</p> pub fn set_broker_ids( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.broker_ids = input; self } /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// Consumes the builder and constructs a [`RebootBrokerInput`](crate::input::RebootBrokerInput) pub fn build( self, ) -> std::result::Result< crate::input::RebootBrokerInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::RebootBrokerInput { broker_ids: self.broker_ids, cluster_arn: self.cluster_arn, }) } } } #[doc(hidden)] pub type RebootBrokerInputOperationOutputAlias = crate::operation::RebootBroker; #[doc(hidden)] pub type RebootBrokerInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl RebootBrokerInput { /// Consumes the builder and constructs an Operation<[`RebootBroker`](crate::operation::RebootBroker)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::RebootBroker, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::RebootBrokerInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_26 = &_input.cluster_arn; let input_26 = input_26 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_26, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/reboot-broker", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::RebootBrokerInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PUT").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::RebootBrokerInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_reboot_broker(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::RebootBroker::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "RebootBroker", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`RebootBrokerInput`](crate::input::RebootBrokerInput) pub fn builder() -> crate::input::reboot_broker_input::Builder { crate::input::reboot_broker_input::Builder::default() } } /// See [`TagResourceInput`](crate::input::TagResourceInput) pub mod tag_resource_input { /// A builder for [`TagResourceInput`](crate::input::TagResourceInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) resource_arn: std::option::Option<std::string::String>, pub(crate) tags: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } /// Adds a key-value pair to `tags`. /// /// To override the contents of this collection use [`set_tags`](Self::set_tags). /// /// <p>The key-value pair for the resource tag.</p> pub fn tags( mut self, k: impl Into<std::string::String>, v: impl Into<std::string::String>, ) -> Self { let mut hash_map = self.tags.unwrap_or_default(); hash_map.insert(k.into(), v.into()); self.tags = Some(hash_map); self } /// <p>The key-value pair for the resource tag.</p> pub fn set_tags( mut self, input: std::option::Option< std::collections::HashMap<std::string::String, std::string::String>, >, ) -> Self { self.tags = input; self } /// Consumes the builder and constructs a [`TagResourceInput`](crate::input::TagResourceInput) pub fn build( self, ) -> std::result::Result< crate::input::TagResourceInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::TagResourceInput { resource_arn: self.resource_arn, tags: self.tags, }) } } } #[doc(hidden)] pub type TagResourceInputOperationOutputAlias = crate::operation::TagResource; #[doc(hidden)] pub type TagResourceInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl TagResourceInput { /// Consumes the builder and constructs an Operation<[`TagResource`](crate::operation::TagResource)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::TagResource, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::TagResourceInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_27 = &_input.resource_arn; let input_27 = input_27 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", })?; let resource_arn = aws_smithy_http::label::fmt_string(input_27, false); if resource_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", }); } write!(output, "/v1/tags/{ResourceArn}", ResourceArn = resource_arn) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::TagResourceInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("POST").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::TagResourceInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_tag_resource(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::TagResource::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "TagResource", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`TagResourceInput`](crate::input::TagResourceInput) pub fn builder() -> crate::input::tag_resource_input::Builder { crate::input::tag_resource_input::Builder::default() } } /// See [`UntagResourceInput`](crate::input::UntagResourceInput) pub mod untag_resource_input { /// A builder for [`UntagResourceInput`](crate::input::UntagResourceInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) resource_arn: std::option::Option<std::string::String>, pub(crate) tag_keys: std::option::Option<std::vec::Vec<std::string::String>>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self { self.resource_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.resource_arn = input; self } /// Appends an item to `tag_keys`. /// /// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys). /// /// <p>Tag keys must be unique for a given cluster. In addition, the following restrictions apply:</p> /// <ul> /// <li> /// <p>Each tag key must be unique. If you add a tag with a key that's already in /// use, your new tag overwrites the existing key-value pair. </p> /// </li> /// <li> /// <p>You can't start a tag key with aws: because this prefix is reserved for use /// by AWS. AWS creates tags that begin with this prefix on your behalf, but /// you can't edit or delete them.</p> /// </li> /// <li> /// <p>Tag keys must be between 1 and 128 Unicode characters in length.</p> /// </li> /// <li> /// <p>Tag keys must consist of the following characters: Unicode letters, digits, /// white space, and the following special characters: _ . / = + - /// @.</p> /// </li> /// </ul> pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self { let mut v = self.tag_keys.unwrap_or_default(); v.push(input.into()); self.tag_keys = Some(v); self } /// <p>Tag keys must be unique for a given cluster. In addition, the following restrictions apply:</p> /// <ul> /// <li> /// <p>Each tag key must be unique. If you add a tag with a key that's already in /// use, your new tag overwrites the existing key-value pair. </p> /// </li> /// <li> /// <p>You can't start a tag key with aws: because this prefix is reserved for use /// by AWS. AWS creates tags that begin with this prefix on your behalf, but /// you can't edit or delete them.</p> /// </li> /// <li> /// <p>Tag keys must be between 1 and 128 Unicode characters in length.</p> /// </li> /// <li> /// <p>Tag keys must consist of the following characters: Unicode letters, digits, /// white space, and the following special characters: _ . / = + - /// @.</p> /// </li> /// </ul> pub fn set_tag_keys( mut self, input: std::option::Option<std::vec::Vec<std::string::String>>, ) -> Self { self.tag_keys = input; self } /// Consumes the builder and constructs a [`UntagResourceInput`](crate::input::UntagResourceInput) pub fn build( self, ) -> std::result::Result< crate::input::UntagResourceInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UntagResourceInput { resource_arn: self.resource_arn, tag_keys: self.tag_keys, }) } } } #[doc(hidden)] pub type UntagResourceInputOperationOutputAlias = crate::operation::UntagResource; #[doc(hidden)] pub type UntagResourceInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UntagResourceInput { /// Consumes the builder and constructs an Operation<[`UntagResource`](crate::operation::UntagResource)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UntagResource, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UntagResourceInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_28 = &_input.resource_arn; let input_28 = input_28 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", })?; let resource_arn = aws_smithy_http::label::fmt_string(input_28, false); if resource_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "resource_arn", details: "cannot be empty or unset", }); } write!(output, "/v1/tags/{ResourceArn}", ResourceArn = resource_arn) .expect("formatting should succeed"); Ok(()) } fn uri_query( _input: &crate::input::UntagResourceInput, mut output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let mut query = aws_smithy_http::query::Writer::new(&mut output); if let Some(inner_29) = &_input.tag_keys { for inner_30 in inner_29 { query.push_kv("tagKeys", &aws_smithy_http::query::fmt_string(&inner_30)); } } Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UntagResourceInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; uri_query(input, &mut uri)?; Ok(builder.method("DELETE").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UntagResourceInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = aws_smithy_http::body::SdkBody::from(""); let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UntagResource::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UntagResource", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UntagResourceInput`](crate::input::UntagResourceInput) pub fn builder() -> crate::input::untag_resource_input::Builder { crate::input::untag_resource_input::Builder::default() } } /// See [`UpdateBrokerCountInput`](crate::input::UpdateBrokerCountInput) pub mod update_broker_count_input { /// A builder for [`UpdateBrokerCountInput`](crate::input::UpdateBrokerCountInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) current_version: std::option::Option<std::string::String>, pub(crate) target_number_of_broker_nodes: std::option::Option<i32>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The version of cluster to update from. A successful operation will then generate a new version.</p> pub fn current_version(mut self, input: impl Into<std::string::String>) -> Self { self.current_version = Some(input.into()); self } /// <p>The version of cluster to update from. A successful operation will then generate a new version.</p> pub fn set_current_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.current_version = input; self } /// <p>The number of broker nodes that you want the cluster to have after this operation completes successfully.</p> pub fn target_number_of_broker_nodes(mut self, input: i32) -> Self { self.target_number_of_broker_nodes = Some(input); self } /// <p>The number of broker nodes that you want the cluster to have after this operation completes successfully.</p> pub fn set_target_number_of_broker_nodes( mut self, input: std::option::Option<i32>, ) -> Self { self.target_number_of_broker_nodes = input; self } /// Consumes the builder and constructs a [`UpdateBrokerCountInput`](crate::input::UpdateBrokerCountInput) pub fn build( self, ) -> std::result::Result< crate::input::UpdateBrokerCountInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UpdateBrokerCountInput { cluster_arn: self.cluster_arn, current_version: self.current_version, target_number_of_broker_nodes: self .target_number_of_broker_nodes .unwrap_or_default(), }) } } } #[doc(hidden)] pub type UpdateBrokerCountInputOperationOutputAlias = crate::operation::UpdateBrokerCount; #[doc(hidden)] pub type UpdateBrokerCountInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateBrokerCountInput { /// Consumes the builder and constructs an Operation<[`UpdateBrokerCount`](crate::operation::UpdateBrokerCount)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UpdateBrokerCount, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UpdateBrokerCountInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_31 = &_input.cluster_arn; let input_31 = input_31 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_31, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/nodes/count", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UpdateBrokerCountInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PUT").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UpdateBrokerCountInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_update_broker_count(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UpdateBrokerCount::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UpdateBrokerCount", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateBrokerCountInput`](crate::input::UpdateBrokerCountInput) pub fn builder() -> crate::input::update_broker_count_input::Builder { crate::input::update_broker_count_input::Builder::default() } } /// See [`UpdateBrokerStorageInput`](crate::input::UpdateBrokerStorageInput) pub mod update_broker_storage_input { /// A builder for [`UpdateBrokerStorageInput`](crate::input::UpdateBrokerStorageInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) current_version: std::option::Option<std::string::String>, pub(crate) target_broker_ebs_volume_info: std::option::Option<std::vec::Vec<crate::model::BrokerEbsVolumeInfo>>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The version of cluster to update from. A successful operation will then generate a new version.</p> pub fn current_version(mut self, input: impl Into<std::string::String>) -> Self { self.current_version = Some(input.into()); self } /// <p>The version of cluster to update from. A successful operation will then generate a new version.</p> pub fn set_current_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.current_version = input; self } /// Appends an item to `target_broker_ebs_volume_info`. /// /// To override the contents of this collection use [`set_target_broker_ebs_volume_info`](Self::set_target_broker_ebs_volume_info). /// /// <p>Describes the target volume size and the ID of the broker to apply the update to.</p> pub fn target_broker_ebs_volume_info( mut self, input: impl Into<crate::model::BrokerEbsVolumeInfo>, ) -> Self { let mut v = self.target_broker_ebs_volume_info.unwrap_or_default(); v.push(input.into()); self.target_broker_ebs_volume_info = Some(v); self } /// <p>Describes the target volume size and the ID of the broker to apply the update to.</p> pub fn set_target_broker_ebs_volume_info( mut self, input: std::option::Option<std::vec::Vec<crate::model::BrokerEbsVolumeInfo>>, ) -> Self { self.target_broker_ebs_volume_info = input; self } /// Consumes the builder and constructs a [`UpdateBrokerStorageInput`](crate::input::UpdateBrokerStorageInput) pub fn build( self, ) -> std::result::Result< crate::input::UpdateBrokerStorageInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UpdateBrokerStorageInput { cluster_arn: self.cluster_arn, current_version: self.current_version, target_broker_ebs_volume_info: self.target_broker_ebs_volume_info, }) } } } #[doc(hidden)] pub type UpdateBrokerStorageInputOperationOutputAlias = crate::operation::UpdateBrokerStorage; #[doc(hidden)] pub type UpdateBrokerStorageInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateBrokerStorageInput { /// Consumes the builder and constructs an Operation<[`UpdateBrokerStorage`](crate::operation::UpdateBrokerStorage)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UpdateBrokerStorage, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UpdateBrokerStorageInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_32 = &_input.cluster_arn; let input_32 = input_32 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_32, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/nodes/storage", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UpdateBrokerStorageInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PUT").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UpdateBrokerStorageInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_update_broker_storage(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UpdateBrokerStorage::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UpdateBrokerStorage", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateBrokerStorageInput`](crate::input::UpdateBrokerStorageInput) pub fn builder() -> crate::input::update_broker_storage_input::Builder { crate::input::update_broker_storage_input::Builder::default() } } /// See [`UpdateBrokerTypeInput`](crate::input::UpdateBrokerTypeInput) pub mod update_broker_type_input { /// A builder for [`UpdateBrokerTypeInput`](crate::input::UpdateBrokerTypeInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) current_version: std::option::Option<std::string::String>, pub(crate) target_instance_type: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The cluster version that you want to change. After this operation completes successfully, the cluster will have a new version.</p> pub fn current_version(mut self, input: impl Into<std::string::String>) -> Self { self.current_version = Some(input.into()); self } /// <p>The cluster version that you want to change. After this operation completes successfully, the cluster will have a new version.</p> pub fn set_current_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.current_version = input; self } /// <p>The Amazon MSK broker type that you want all of the brokers in this cluster to be.</p> pub fn target_instance_type(mut self, input: impl Into<std::string::String>) -> Self { self.target_instance_type = Some(input.into()); self } /// <p>The Amazon MSK broker type that you want all of the brokers in this cluster to be.</p> pub fn set_target_instance_type( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.target_instance_type = input; self } /// Consumes the builder and constructs a [`UpdateBrokerTypeInput`](crate::input::UpdateBrokerTypeInput) pub fn build( self, ) -> std::result::Result< crate::input::UpdateBrokerTypeInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UpdateBrokerTypeInput { cluster_arn: self.cluster_arn, current_version: self.current_version, target_instance_type: self.target_instance_type, }) } } } #[doc(hidden)] pub type UpdateBrokerTypeInputOperationOutputAlias = crate::operation::UpdateBrokerType; #[doc(hidden)] pub type UpdateBrokerTypeInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateBrokerTypeInput { /// Consumes the builder and constructs an Operation<[`UpdateBrokerType`](crate::operation::UpdateBrokerType)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UpdateBrokerType, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UpdateBrokerTypeInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_33 = &_input.cluster_arn; let input_33 = input_33 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_33, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/nodes/type", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UpdateBrokerTypeInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PUT").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UpdateBrokerTypeInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_update_broker_type(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UpdateBrokerType::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UpdateBrokerType", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateBrokerTypeInput`](crate::input::UpdateBrokerTypeInput) pub fn builder() -> crate::input::update_broker_type_input::Builder { crate::input::update_broker_type_input::Builder::default() } } /// See [`UpdateClusterConfigurationInput`](crate::input::UpdateClusterConfigurationInput) pub mod update_cluster_configuration_input { /// A builder for [`UpdateClusterConfigurationInput`](crate::input::UpdateClusterConfigurationInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) configuration_info: std::option::Option<crate::model::ConfigurationInfo>, pub(crate) current_version: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>Represents the configuration that you want MSK to use for the brokers in a cluster.</p> pub fn configuration_info(mut self, input: crate::model::ConfigurationInfo) -> Self { self.configuration_info = Some(input); self } /// <p>Represents the configuration that you want MSK to use for the brokers in a cluster.</p> pub fn set_configuration_info( mut self, input: std::option::Option<crate::model::ConfigurationInfo>, ) -> Self { self.configuration_info = input; self } /// <p>The version of the cluster that needs to be updated.</p> pub fn current_version(mut self, input: impl Into<std::string::String>) -> Self { self.current_version = Some(input.into()); self } /// <p>The version of the cluster that needs to be updated.</p> pub fn set_current_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.current_version = input; self } /// Consumes the builder and constructs a [`UpdateClusterConfigurationInput`](crate::input::UpdateClusterConfigurationInput) pub fn build( self, ) -> std::result::Result< crate::input::UpdateClusterConfigurationInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UpdateClusterConfigurationInput { cluster_arn: self.cluster_arn, configuration_info: self.configuration_info, current_version: self.current_version, }) } } } #[doc(hidden)] pub type UpdateClusterConfigurationInputOperationOutputAlias = crate::operation::UpdateClusterConfiguration; #[doc(hidden)] pub type UpdateClusterConfigurationInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateClusterConfigurationInput { /// Consumes the builder and constructs an Operation<[`UpdateClusterConfiguration`](crate::operation::UpdateClusterConfiguration)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UpdateClusterConfiguration, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UpdateClusterConfigurationInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_34 = &_input.cluster_arn; let input_34 = input_34 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_34, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/configuration", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UpdateClusterConfigurationInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PUT").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UpdateClusterConfigurationInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_update_cluster_configuration( &self, )?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UpdateClusterConfiguration::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UpdateClusterConfiguration", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateClusterConfigurationInput`](crate::input::UpdateClusterConfigurationInput) pub fn builder() -> crate::input::update_cluster_configuration_input::Builder { crate::input::update_cluster_configuration_input::Builder::default() } } /// See [`UpdateClusterKafkaVersionInput`](crate::input::UpdateClusterKafkaVersionInput) pub mod update_cluster_kafka_version_input { /// A builder for [`UpdateClusterKafkaVersionInput`](crate::input::UpdateClusterKafkaVersionInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) configuration_info: std::option::Option<crate::model::ConfigurationInfo>, pub(crate) current_version: std::option::Option<std::string::String>, pub(crate) target_kafka_version: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The custom configuration that should be applied on the new version of cluster.</p> pub fn configuration_info(mut self, input: crate::model::ConfigurationInfo) -> Self { self.configuration_info = Some(input); self } /// <p>The custom configuration that should be applied on the new version of cluster.</p> pub fn set_configuration_info( mut self, input: std::option::Option<crate::model::ConfigurationInfo>, ) -> Self { self.configuration_info = input; self } /// <p>Current cluster version.</p> pub fn current_version(mut self, input: impl Into<std::string::String>) -> Self { self.current_version = Some(input.into()); self } /// <p>Current cluster version.</p> pub fn set_current_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.current_version = input; self } /// <p>Target Kafka version.</p> pub fn target_kafka_version(mut self, input: impl Into<std::string::String>) -> Self { self.target_kafka_version = Some(input.into()); self } /// <p>Target Kafka version.</p> pub fn set_target_kafka_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.target_kafka_version = input; self } /// Consumes the builder and constructs a [`UpdateClusterKafkaVersionInput`](crate::input::UpdateClusterKafkaVersionInput) pub fn build( self, ) -> std::result::Result< crate::input::UpdateClusterKafkaVersionInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UpdateClusterKafkaVersionInput { cluster_arn: self.cluster_arn, configuration_info: self.configuration_info, current_version: self.current_version, target_kafka_version: self.target_kafka_version, }) } } } #[doc(hidden)] pub type UpdateClusterKafkaVersionInputOperationOutputAlias = crate::operation::UpdateClusterKafkaVersion; #[doc(hidden)] pub type UpdateClusterKafkaVersionInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateClusterKafkaVersionInput { /// Consumes the builder and constructs an Operation<[`UpdateClusterKafkaVersion`](crate::operation::UpdateClusterKafkaVersion)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UpdateClusterKafkaVersion, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UpdateClusterKafkaVersionInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_35 = &_input.cluster_arn; let input_35 = input_35 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_35, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/version", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UpdateClusterKafkaVersionInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PUT").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UpdateClusterKafkaVersionInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_update_cluster_kafka_version( &self, )?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UpdateClusterKafkaVersion::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UpdateClusterKafkaVersion", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateClusterKafkaVersionInput`](crate::input::UpdateClusterKafkaVersionInput) pub fn builder() -> crate::input::update_cluster_kafka_version_input::Builder { crate::input::update_cluster_kafka_version_input::Builder::default() } } /// See [`UpdateConfigurationInput`](crate::input::UpdateConfigurationInput) pub mod update_configuration_input { /// A builder for [`UpdateConfigurationInput`](crate::input::UpdateConfigurationInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) arn: std::option::Option<std::string::String>, pub(crate) description: std::option::Option<std::string::String>, pub(crate) server_properties: std::option::Option<aws_smithy_types::Blob>, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the configuration.</p> pub fn arn(mut self, input: impl Into<std::string::String>) -> Self { self.arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the configuration.</p> pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.arn = input; self } /// <p>The description of the configuration revision.</p> pub fn description(mut self, input: impl Into<std::string::String>) -> Self { self.description = Some(input.into()); self } /// <p>The description of the configuration revision.</p> pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self { self.description = input; self } /// <p>Contents of the <filename>server.properties</filename> file. When using the API, you must ensure that the contents of the file are base64 encoded. /// When using the AWS Management Console, the SDK, or the AWS CLI, the contents of <filename>server.properties</filename> can be in plaintext.</p> pub fn server_properties(mut self, input: aws_smithy_types::Blob) -> Self { self.server_properties = Some(input); self } /// <p>Contents of the <filename>server.properties</filename> file. When using the API, you must ensure that the contents of the file are base64 encoded. /// When using the AWS Management Console, the SDK, or the AWS CLI, the contents of <filename>server.properties</filename> can be in plaintext.</p> pub fn set_server_properties( mut self, input: std::option::Option<aws_smithy_types::Blob>, ) -> Self { self.server_properties = input; self } /// Consumes the builder and constructs a [`UpdateConfigurationInput`](crate::input::UpdateConfigurationInput) pub fn build( self, ) -> std::result::Result< crate::input::UpdateConfigurationInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UpdateConfigurationInput { arn: self.arn, description: self.description, server_properties: self.server_properties, }) } } } #[doc(hidden)] pub type UpdateConfigurationInputOperationOutputAlias = crate::operation::UpdateConfiguration; #[doc(hidden)] pub type UpdateConfigurationInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateConfigurationInput { /// Consumes the builder and constructs an Operation<[`UpdateConfiguration`](crate::operation::UpdateConfiguration)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UpdateConfiguration, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UpdateConfigurationInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_36 = &_input.arn; let input_36 = input_36 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", })?; let arn = aws_smithy_http::label::fmt_string(input_36, false); if arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "arn", details: "cannot be empty or unset", }); } write!(output, "/v1/configurations/{Arn}", Arn = arn) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UpdateConfigurationInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PUT").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UpdateConfigurationInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_update_configuration(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UpdateConfiguration::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UpdateConfiguration", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateConfigurationInput`](crate::input::UpdateConfigurationInput) pub fn builder() -> crate::input::update_configuration_input::Builder { crate::input::update_configuration_input::Builder::default() } } /// See [`UpdateConnectivityInput`](crate::input::UpdateConnectivityInput) pub mod update_connectivity_input { /// A builder for [`UpdateConnectivityInput`](crate::input::UpdateConnectivityInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) connectivity_info: std::option::Option<crate::model::ConnectivityInfo>, pub(crate) current_version: std::option::Option<std::string::String>, } impl Builder { /// <p>The Amazon Resource Name (ARN) of the configuration.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) of the configuration.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>Information about the broker access configuration.</p> pub fn connectivity_info(mut self, input: crate::model::ConnectivityInfo) -> Self { self.connectivity_info = Some(input); self } /// <p>Information about the broker access configuration.</p> pub fn set_connectivity_info( mut self, input: std::option::Option<crate::model::ConnectivityInfo>, ) -> Self { self.connectivity_info = input; self } /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub fn current_version(mut self, input: impl Into<std::string::String>) -> Self { self.current_version = Some(input.into()); self } /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub fn set_current_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.current_version = input; self } /// Consumes the builder and constructs a [`UpdateConnectivityInput`](crate::input::UpdateConnectivityInput) pub fn build( self, ) -> std::result::Result< crate::input::UpdateConnectivityInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UpdateConnectivityInput { cluster_arn: self.cluster_arn, connectivity_info: self.connectivity_info, current_version: self.current_version, }) } } } #[doc(hidden)] pub type UpdateConnectivityInputOperationOutputAlias = crate::operation::UpdateConnectivity; #[doc(hidden)] pub type UpdateConnectivityInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateConnectivityInput { /// Consumes the builder and constructs an Operation<[`UpdateConnectivity`](crate::operation::UpdateConnectivity)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UpdateConnectivity, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UpdateConnectivityInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_37 = &_input.cluster_arn; let input_37 = input_37 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_37, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/connectivity", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UpdateConnectivityInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PUT").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UpdateConnectivityInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_update_connectivity(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UpdateConnectivity::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UpdateConnectivity", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateConnectivityInput`](crate::input::UpdateConnectivityInput) pub fn builder() -> crate::input::update_connectivity_input::Builder { crate::input::update_connectivity_input::Builder::default() } } /// See [`UpdateMonitoringInput`](crate::input::UpdateMonitoringInput) pub mod update_monitoring_input { /// A builder for [`UpdateMonitoringInput`](crate::input::UpdateMonitoringInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) current_version: std::option::Option<std::string::String>, pub(crate) enhanced_monitoring: std::option::Option<crate::model::EnhancedMonitoring>, pub(crate) open_monitoring: std::option::Option<crate::model::OpenMonitoringInfo>, pub(crate) logging_info: std::option::Option<crate::model::LoggingInfo>, } impl Builder { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub fn current_version(mut self, input: impl Into<std::string::String>) -> Self { self.current_version = Some(input.into()); self } /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub fn set_current_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.current_version = input; self } /// <p>Specifies which Apache Kafka metrics Amazon MSK gathers and sends to Amazon CloudWatch for this cluster.</p> pub fn enhanced_monitoring(mut self, input: crate::model::EnhancedMonitoring) -> Self { self.enhanced_monitoring = Some(input); self } /// <p>Specifies which Apache Kafka metrics Amazon MSK gathers and sends to Amazon CloudWatch for this cluster.</p> pub fn set_enhanced_monitoring( mut self, input: std::option::Option<crate::model::EnhancedMonitoring>, ) -> Self { self.enhanced_monitoring = input; self } /// <p>The settings for open monitoring.</p> pub fn open_monitoring(mut self, input: crate::model::OpenMonitoringInfo) -> Self { self.open_monitoring = Some(input); self } /// <p>The settings for open monitoring.</p> pub fn set_open_monitoring( mut self, input: std::option::Option<crate::model::OpenMonitoringInfo>, ) -> Self { self.open_monitoring = input; self } #[allow(missing_docs)] // documentation missing in model pub fn logging_info(mut self, input: crate::model::LoggingInfo) -> Self { self.logging_info = Some(input); self } #[allow(missing_docs)] // documentation missing in model pub fn set_logging_info( mut self, input: std::option::Option<crate::model::LoggingInfo>, ) -> Self { self.logging_info = input; self } /// Consumes the builder and constructs a [`UpdateMonitoringInput`](crate::input::UpdateMonitoringInput) pub fn build( self, ) -> std::result::Result< crate::input::UpdateMonitoringInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UpdateMonitoringInput { cluster_arn: self.cluster_arn, current_version: self.current_version, enhanced_monitoring: self.enhanced_monitoring, open_monitoring: self.open_monitoring, logging_info: self.logging_info, }) } } } #[doc(hidden)] pub type UpdateMonitoringInputOperationOutputAlias = crate::operation::UpdateMonitoring; #[doc(hidden)] pub type UpdateMonitoringInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateMonitoringInput { /// Consumes the builder and constructs an Operation<[`UpdateMonitoring`](crate::operation::UpdateMonitoring)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UpdateMonitoring, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UpdateMonitoringInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_38 = &_input.cluster_arn; let input_38 = input_38 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_38, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/monitoring", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UpdateMonitoringInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PUT").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UpdateMonitoringInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_update_monitoring(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UpdateMonitoring::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UpdateMonitoring", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateMonitoringInput`](crate::input::UpdateMonitoringInput) pub fn builder() -> crate::input::update_monitoring_input::Builder { crate::input::update_monitoring_input::Builder::default() } } /// See [`UpdateSecurityInput`](crate::input::UpdateSecurityInput) pub mod update_security_input { /// A builder for [`UpdateSecurityInput`](crate::input::UpdateSecurityInput) #[non_exhaustive] #[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)] pub struct Builder { pub(crate) client_authentication: std::option::Option<crate::model::ClientAuthentication>, pub(crate) cluster_arn: std::option::Option<std::string::String>, pub(crate) current_version: std::option::Option<std::string::String>, pub(crate) encryption_info: std::option::Option<crate::model::EncryptionInfo>, } impl Builder { /// <p>Includes all client authentication related information.</p> pub fn client_authentication(mut self, input: crate::model::ClientAuthentication) -> Self { self.client_authentication = Some(input); self } /// <p>Includes all client authentication related information.</p> pub fn set_client_authentication( mut self, input: std::option::Option<crate::model::ClientAuthentication>, ) -> Self { self.client_authentication = input; self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(mut self, input: impl Into<std::string::String>) -> Self { self.cluster_arn = Some(input.into()); self } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn set_cluster_arn(mut self, input: std::option::Option<std::string::String>) -> Self { self.cluster_arn = input; self } /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub fn current_version(mut self, input: impl Into<std::string::String>) -> Self { self.current_version = Some(input.into()); self } /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub fn set_current_version( mut self, input: std::option::Option<std::string::String>, ) -> Self { self.current_version = input; self } /// <p>Includes all encryption-related information.</p> pub fn encryption_info(mut self, input: crate::model::EncryptionInfo) -> Self { self.encryption_info = Some(input); self } /// <p>Includes all encryption-related information.</p> pub fn set_encryption_info( mut self, input: std::option::Option<crate::model::EncryptionInfo>, ) -> Self { self.encryption_info = input; self } /// Consumes the builder and constructs a [`UpdateSecurityInput`](crate::input::UpdateSecurityInput) pub fn build( self, ) -> std::result::Result< crate::input::UpdateSecurityInput, aws_smithy_http::operation::BuildError, > { Ok(crate::input::UpdateSecurityInput { client_authentication: self.client_authentication, cluster_arn: self.cluster_arn, current_version: self.current_version, encryption_info: self.encryption_info, }) } } } #[doc(hidden)] pub type UpdateSecurityInputOperationOutputAlias = crate::operation::UpdateSecurity; #[doc(hidden)] pub type UpdateSecurityInputOperationRetryAlias = aws_http::AwsErrorRetryPolicy; impl UpdateSecurityInput { /// Consumes the builder and constructs an Operation<[`UpdateSecurity`](crate::operation::UpdateSecurity)> #[allow(clippy::let_and_return)] #[allow(clippy::needless_borrow)] pub async fn make_operation( &self, _config: &crate::config::Config, ) -> std::result::Result< aws_smithy_http::operation::Operation< crate::operation::UpdateSecurity, aws_http::AwsErrorRetryPolicy, >, aws_smithy_http::operation::BuildError, > { fn uri_base( _input: &crate::input::UpdateSecurityInput, output: &mut String, ) -> Result<(), aws_smithy_http::operation::BuildError> { let input_39 = &_input.cluster_arn; let input_39 = input_39 .as_ref() .ok_or(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", })?; let cluster_arn = aws_smithy_http::label::fmt_string(input_39, false); if cluster_arn.is_empty() { return Err(aws_smithy_http::operation::BuildError::MissingField { field: "cluster_arn", details: "cannot be empty or unset", }); } write!( output, "/v1/clusters/{ClusterArn}/security", ClusterArn = cluster_arn ) .expect("formatting should succeed"); Ok(()) } #[allow(clippy::unnecessary_wraps)] fn update_http_builder( input: &crate::input::UpdateSecurityInput, builder: http::request::Builder, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { let mut uri = String::new(); uri_base(input, &mut uri)?; Ok(builder.method("PATCH").uri(uri)) } #[allow(clippy::unnecessary_wraps)] fn request_builder_base( input: &crate::input::UpdateSecurityInput, ) -> std::result::Result<http::request::Builder, aws_smithy_http::operation::BuildError> { #[allow(unused_mut)] let mut builder = update_http_builder(input, http::request::Builder::new())?; builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::HeaderName::from_static("content-type"), "application/json", ); Ok(builder) } let properties = aws_smithy_http::property_bag::SharedPropertyBag::new(); let request = request_builder_base(&self)?; let body = crate::operation_ser::serialize_operation_crate_operation_update_security(&self)?; let request = Self::assemble(request, body); #[allow(unused_mut)] let mut request = aws_smithy_http::operation::Request::from_parts( request.map(aws_smithy_http::body::SdkBody::from), properties, ); let mut user_agent = aws_http::user_agent::AwsUserAgent::new_from_environment( aws_types::os_shim_internal::Env::real(), crate::API_METADATA.clone(), ); if let Some(app_name) = _config.app_name() { user_agent = user_agent.with_app_name(app_name.clone()); } request.properties_mut().insert(user_agent); #[allow(unused_mut)] let mut signing_config = aws_sig_auth::signer::OperationSigningConfig::default_config(); request.properties_mut().insert(signing_config); request .properties_mut() .insert(aws_types::SigningService::from_static( _config.signing_service(), )); aws_endpoint::set_endpoint_resolver( &mut request.properties_mut(), _config.endpoint_resolver.clone(), ); if let Some(region) = &_config.region { request.properties_mut().insert(region.clone()); } aws_http::auth::set_provider( &mut request.properties_mut(), _config.credentials_provider.clone(), ); let op = aws_smithy_http::operation::Operation::new( request, crate::operation::UpdateSecurity::new(), ) .with_metadata(aws_smithy_http::operation::Metadata::new( "UpdateSecurity", "kafka", )); let op = op.with_retry_policy(aws_http::AwsErrorRetryPolicy::new()); Ok(op) } fn assemble( builder: http::request::Builder, body: aws_smithy_http::body::SdkBody, ) -> http::request::Request<aws_smithy_http::body::SdkBody> { let mut builder = builder; if let Some(content_length) = body.content_length() { builder = aws_smithy_http::header::set_header_if_absent( builder, http::header::CONTENT_LENGTH, content_length, ); } builder.body(body).expect("should be valid request") } /// Creates a new builder-style object to manufacture [`UpdateSecurityInput`](crate::input::UpdateSecurityInput) pub fn builder() -> crate::input::update_security_input::Builder { crate::input::update_security_input::Builder::default() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateSecurityInput { /// <p>Includes all client authentication related information.</p> pub client_authentication: std::option::Option<crate::model::ClientAuthentication>, /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub current_version: std::option::Option<std::string::String>, /// <p>Includes all encryption-related information.</p> pub encryption_info: std::option::Option<crate::model::EncryptionInfo>, } impl UpdateSecurityInput { /// <p>Includes all client authentication related information.</p> pub fn client_authentication( &self, ) -> std::option::Option<&crate::model::ClientAuthentication> { self.client_authentication.as_ref() } /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub fn current_version(&self) -> std::option::Option<&str> { self.current_version.as_deref() } /// <p>Includes all encryption-related information.</p> pub fn encryption_info(&self) -> std::option::Option<&crate::model::EncryptionInfo> { self.encryption_info.as_ref() } } impl std::fmt::Debug for UpdateSecurityInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateSecurityInput"); formatter.field("client_authentication", &self.client_authentication); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("current_version", &self.current_version); formatter.field("encryption_info", &self.encryption_info); formatter.finish() } } /// Request body for UpdateMonitoring. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateMonitoringInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub current_version: std::option::Option<std::string::String>, /// <p>Specifies which Apache Kafka metrics Amazon MSK gathers and sends to Amazon CloudWatch for this cluster.</p> pub enhanced_monitoring: std::option::Option<crate::model::EnhancedMonitoring>, /// <p>The settings for open monitoring.</p> pub open_monitoring: std::option::Option<crate::model::OpenMonitoringInfo>, #[allow(missing_docs)] // documentation missing in model pub logging_info: std::option::Option<crate::model::LoggingInfo>, } impl UpdateMonitoringInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub fn current_version(&self) -> std::option::Option<&str> { self.current_version.as_deref() } /// <p>Specifies which Apache Kafka metrics Amazon MSK gathers and sends to Amazon CloudWatch for this cluster.</p> pub fn enhanced_monitoring(&self) -> std::option::Option<&crate::model::EnhancedMonitoring> { self.enhanced_monitoring.as_ref() } /// <p>The settings for open monitoring.</p> pub fn open_monitoring(&self) -> std::option::Option<&crate::model::OpenMonitoringInfo> { self.open_monitoring.as_ref() } #[allow(missing_docs)] // documentation missing in model pub fn logging_info(&self) -> std::option::Option<&crate::model::LoggingInfo> { self.logging_info.as_ref() } } impl std::fmt::Debug for UpdateMonitoringInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateMonitoringInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("current_version", &self.current_version); formatter.field("enhanced_monitoring", &self.enhanced_monitoring); formatter.field("open_monitoring", &self.open_monitoring); formatter.field("logging_info", &self.logging_info); formatter.finish() } } /// Request body for UpdateConnectivity. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateConnectivityInput { /// <p>The Amazon Resource Name (ARN) of the configuration.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>Information about the broker access configuration.</p> pub connectivity_info: std::option::Option<crate::model::ConnectivityInfo>, /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub current_version: std::option::Option<std::string::String>, } impl UpdateConnectivityInput { /// <p>The Amazon Resource Name (ARN) of the configuration.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>Information about the broker access configuration.</p> pub fn connectivity_info(&self) -> std::option::Option<&crate::model::ConnectivityInfo> { self.connectivity_info.as_ref() } /// <p>The version of the MSK cluster to update. Cluster versions aren't simple numbers. You can describe an MSK cluster to find its version. When this update operation is successful, it generates a new cluster version.</p> pub fn current_version(&self) -> std::option::Option<&str> { self.current_version.as_deref() } } impl std::fmt::Debug for UpdateConnectivityInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateConnectivityInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("connectivity_info", &self.connectivity_info); formatter.field("current_version", &self.current_version); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateConfigurationInput { /// <p>The Amazon Resource Name (ARN) of the configuration.</p> pub arn: std::option::Option<std::string::String>, /// <p>The description of the configuration revision.</p> pub description: std::option::Option<std::string::String>, /// <p>Contents of the <filename>server.properties</filename> file. When using the API, you must ensure that the contents of the file are base64 encoded. /// When using the AWS Management Console, the SDK, or the AWS CLI, the contents of <filename>server.properties</filename> can be in plaintext.</p> pub server_properties: std::option::Option<aws_smithy_types::Blob>, } impl UpdateConfigurationInput { /// <p>The Amazon Resource Name (ARN) of the configuration.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>The description of the configuration revision.</p> pub fn description(&self) -> std::option::Option<&str> { self.description.as_deref() } /// <p>Contents of the <filename>server.properties</filename> file. When using the API, you must ensure that the contents of the file are base64 encoded. /// When using the AWS Management Console, the SDK, or the AWS CLI, the contents of <filename>server.properties</filename> can be in plaintext.</p> pub fn server_properties(&self) -> std::option::Option<&aws_smithy_types::Blob> { self.server_properties.as_ref() } } impl std::fmt::Debug for UpdateConfigurationInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateConfigurationInput"); formatter.field("arn", &self.arn); formatter.field("description", &self.description); formatter.field("server_properties", &self.server_properties); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateClusterKafkaVersionInput { /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The custom configuration that should be applied on the new version of cluster.</p> pub configuration_info: std::option::Option<crate::model::ConfigurationInfo>, /// <p>Current cluster version.</p> pub current_version: std::option::Option<std::string::String>, /// <p>Target Kafka version.</p> pub target_kafka_version: std::option::Option<std::string::String>, } impl UpdateClusterKafkaVersionInput { /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The custom configuration that should be applied on the new version of cluster.</p> pub fn configuration_info(&self) -> std::option::Option<&crate::model::ConfigurationInfo> { self.configuration_info.as_ref() } /// <p>Current cluster version.</p> pub fn current_version(&self) -> std::option::Option<&str> { self.current_version.as_deref() } /// <p>Target Kafka version.</p> pub fn target_kafka_version(&self) -> std::option::Option<&str> { self.target_kafka_version.as_deref() } } impl std::fmt::Debug for UpdateClusterKafkaVersionInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateClusterKafkaVersionInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("configuration_info", &self.configuration_info); formatter.field("current_version", &self.current_version); formatter.field("target_kafka_version", &self.target_kafka_version); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateClusterConfigurationInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>Represents the configuration that you want MSK to use for the brokers in a cluster.</p> pub configuration_info: std::option::Option<crate::model::ConfigurationInfo>, /// <p>The version of the cluster that needs to be updated.</p> pub current_version: std::option::Option<std::string::String>, } impl UpdateClusterConfigurationInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>Represents the configuration that you want MSK to use for the brokers in a cluster.</p> pub fn configuration_info(&self) -> std::option::Option<&crate::model::ConfigurationInfo> { self.configuration_info.as_ref() } /// <p>The version of the cluster that needs to be updated.</p> pub fn current_version(&self) -> std::option::Option<&str> { self.current_version.as_deref() } } impl std::fmt::Debug for UpdateClusterConfigurationInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateClusterConfigurationInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("configuration_info", &self.configuration_info); formatter.field("current_version", &self.current_version); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateBrokerTypeInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The cluster version that you want to change. After this operation completes successfully, the cluster will have a new version.</p> pub current_version: std::option::Option<std::string::String>, /// <p>The Amazon MSK broker type that you want all of the brokers in this cluster to be.</p> pub target_instance_type: std::option::Option<std::string::String>, } impl UpdateBrokerTypeInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The cluster version that you want to change. After this operation completes successfully, the cluster will have a new version.</p> pub fn current_version(&self) -> std::option::Option<&str> { self.current_version.as_deref() } /// <p>The Amazon MSK broker type that you want all of the brokers in this cluster to be.</p> pub fn target_instance_type(&self) -> std::option::Option<&str> { self.target_instance_type.as_deref() } } impl std::fmt::Debug for UpdateBrokerTypeInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateBrokerTypeInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("current_version", &self.current_version); formatter.field("target_instance_type", &self.target_instance_type); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateBrokerStorageInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The version of cluster to update from. A successful operation will then generate a new version.</p> pub current_version: std::option::Option<std::string::String>, /// <p>Describes the target volume size and the ID of the broker to apply the update to.</p> pub target_broker_ebs_volume_info: std::option::Option<std::vec::Vec<crate::model::BrokerEbsVolumeInfo>>, } impl UpdateBrokerStorageInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The version of cluster to update from. A successful operation will then generate a new version.</p> pub fn current_version(&self) -> std::option::Option<&str> { self.current_version.as_deref() } /// <p>Describes the target volume size and the ID of the broker to apply the update to.</p> pub fn target_broker_ebs_volume_info( &self, ) -> std::option::Option<&[crate::model::BrokerEbsVolumeInfo]> { self.target_broker_ebs_volume_info.as_deref() } } impl std::fmt::Debug for UpdateBrokerStorageInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateBrokerStorageInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("current_version", &self.current_version); formatter.field( "target_broker_ebs_volume_info", &self.target_broker_ebs_volume_info, ); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UpdateBrokerCountInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The version of cluster to update from. A successful operation will then generate a new version.</p> pub current_version: std::option::Option<std::string::String>, /// <p>The number of broker nodes that you want the cluster to have after this operation completes successfully.</p> pub target_number_of_broker_nodes: i32, } impl UpdateBrokerCountInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The version of cluster to update from. A successful operation will then generate a new version.</p> pub fn current_version(&self) -> std::option::Option<&str> { self.current_version.as_deref() } /// <p>The number of broker nodes that you want the cluster to have after this operation completes successfully.</p> pub fn target_number_of_broker_nodes(&self) -> i32 { self.target_number_of_broker_nodes } } impl std::fmt::Debug for UpdateBrokerCountInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UpdateBrokerCountInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("current_version", &self.current_version); formatter.field( "target_number_of_broker_nodes", &self.target_number_of_broker_nodes, ); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct UntagResourceInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub resource_arn: std::option::Option<std::string::String>, /// <p>Tag keys must be unique for a given cluster. In addition, the following restrictions apply:</p> /// <ul> /// <li> /// <p>Each tag key must be unique. If you add a tag with a key that's already in /// use, your new tag overwrites the existing key-value pair. </p> /// </li> /// <li> /// <p>You can't start a tag key with aws: because this prefix is reserved for use /// by AWS. AWS creates tags that begin with this prefix on your behalf, but /// you can't edit or delete them.</p> /// </li> /// <li> /// <p>Tag keys must be between 1 and 128 Unicode characters in length.</p> /// </li> /// <li> /// <p>Tag keys must consist of the following characters: Unicode letters, digits, /// white space, and the following special characters: _ . / = + - /// @.</p> /// </li> /// </ul> pub tag_keys: std::option::Option<std::vec::Vec<std::string::String>>, } impl UntagResourceInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub fn resource_arn(&self) -> std::option::Option<&str> { self.resource_arn.as_deref() } /// <p>Tag keys must be unique for a given cluster. In addition, the following restrictions apply:</p> /// <ul> /// <li> /// <p>Each tag key must be unique. If you add a tag with a key that's already in /// use, your new tag overwrites the existing key-value pair. </p> /// </li> /// <li> /// <p>You can't start a tag key with aws: because this prefix is reserved for use /// by AWS. AWS creates tags that begin with this prefix on your behalf, but /// you can't edit or delete them.</p> /// </li> /// <li> /// <p>Tag keys must be between 1 and 128 Unicode characters in length.</p> /// </li> /// <li> /// <p>Tag keys must consist of the following characters: Unicode letters, digits, /// white space, and the following special characters: _ . / = + - /// @.</p> /// </li> /// </ul> pub fn tag_keys(&self) -> std::option::Option<&[std::string::String]> { self.tag_keys.as_deref() } } impl std::fmt::Debug for UntagResourceInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("UntagResourceInput"); formatter.field("resource_arn", &self.resource_arn); formatter.field("tag_keys", &self.tag_keys); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct TagResourceInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub resource_arn: std::option::Option<std::string::String>, /// <p>The key-value pair for the resource tag.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, } impl TagResourceInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub fn resource_arn(&self) -> std::option::Option<&str> { self.resource_arn.as_deref() } /// <p>The key-value pair for the resource tag.</p> pub fn tags( &self, ) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>> { self.tags.as_ref() } } impl std::fmt::Debug for TagResourceInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("TagResourceInput"); formatter.field("resource_arn", &self.resource_arn); formatter.field("tags", &self.tags); formatter.finish() } } /// Reboots a node. #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct RebootBrokerInput { /// <p>The list of broker IDs to be rebooted. The reboot-broker operation supports rebooting one broker at a time.</p> pub broker_ids: std::option::Option<std::vec::Vec<std::string::String>>, /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub cluster_arn: std::option::Option<std::string::String>, } impl RebootBrokerInput { /// <p>The list of broker IDs to be rebooted. The reboot-broker operation supports rebooting one broker at a time.</p> pub fn broker_ids(&self) -> std::option::Option<&[std::string::String]> { self.broker_ids.as_deref() } /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } } impl std::fmt::Debug for RebootBrokerInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("RebootBrokerInput"); formatter.field("broker_ids", &self.broker_ids); formatter.field("cluster_arn", &self.cluster_arn); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListTagsForResourceInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub resource_arn: std::option::Option<std::string::String>, } impl ListTagsForResourceInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the resource that's associated with the tags.</p> pub fn resource_arn(&self) -> std::option::Option<&str> { self.resource_arn.as_deref() } } impl std::fmt::Debug for ListTagsForResourceInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListTagsForResourceInput"); formatter.field("resource_arn", &self.resource_arn); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListScramSecretsInput { /// <p>The arn of the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The maxResults of the query.</p> pub max_results: i32, /// <p>The nextToken of the query.</p> pub next_token: std::option::Option<std::string::String>, } impl ListScramSecretsInput { /// <p>The arn of the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The maxResults of the query.</p> pub fn max_results(&self) -> i32 { self.max_results } /// <p>The nextToken of the query.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListScramSecretsInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListScramSecretsInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListNodesInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub max_results: i32, /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub next_token: std::option::Option<std::string::String>, } impl ListNodesInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(&self) -> i32 { self.max_results } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListNodesInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListNodesInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListKafkaVersionsInput { /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub max_results: i32, /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. To get the next batch, provide this token in your next request.</p> pub next_token: std::option::Option<std::string::String>, } impl ListKafkaVersionsInput { /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(&self) -> i32 { self.max_results } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. To get the next batch, provide this token in your next request.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListKafkaVersionsInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListKafkaVersionsInput"); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListConfigurationsInput { /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub max_results: i32, /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub next_token: std::option::Option<std::string::String>, } impl ListConfigurationsInput { /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(&self) -> i32 { self.max_results } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListConfigurationsInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListConfigurationsInput"); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListConfigurationRevisionsInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub arn: std::option::Option<std::string::String>, /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub max_results: i32, /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub next_token: std::option::Option<std::string::String>, } impl ListConfigurationRevisionsInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(&self) -> i32 { self.max_results } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListConfigurationRevisionsInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListConfigurationRevisionsInput"); formatter.field("arn", &self.arn); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListClustersInput { /// <p>Specify a prefix of the name of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.</p> pub cluster_name_filter: std::option::Option<std::string::String>, /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub max_results: i32, /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub next_token: std::option::Option<std::string::String>, } impl ListClustersInput { /// <p>Specify a prefix of the name of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.</p> pub fn cluster_name_filter(&self) -> std::option::Option<&str> { self.cluster_name_filter.as_deref() } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(&self) -> i32 { self.max_results } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListClustersInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListClustersInput"); formatter.field("cluster_name_filter", &self.cluster_name_filter); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct ListClusterOperationsInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub max_results: i32, /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub next_token: std::option::Option<std::string::String>, } impl ListClusterOperationsInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.</p> pub fn max_results(&self) -> i32 { self.max_results } /// <p>The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. /// To get the next batch, provide this token in your next request.</p> pub fn next_token(&self) -> std::option::Option<&str> { self.next_token.as_deref() } } impl std::fmt::Debug for ListClusterOperationsInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("ListClusterOperationsInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("max_results", &self.max_results); formatter.field("next_token", &self.next_token); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetCompatibleKafkaVersionsInput { /// <p>The Amazon Resource Name (ARN) of the cluster check.</p> pub cluster_arn: std::option::Option<std::string::String>, } impl GetCompatibleKafkaVersionsInput { /// <p>The Amazon Resource Name (ARN) of the cluster check.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } } impl std::fmt::Debug for GetCompatibleKafkaVersionsInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetCompatibleKafkaVersionsInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct GetBootstrapBrokersInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, } impl GetBootstrapBrokersInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } } impl std::fmt::Debug for GetBootstrapBrokersInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("GetBootstrapBrokersInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeConfigurationRevisionInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub arn: std::option::Option<std::string::String>, /// <p>A string that uniquely identifies a revision of an MSK configuration.</p> pub revision: i64, } impl DescribeConfigurationRevisionInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } /// <p>A string that uniquely identifies a revision of an MSK configuration.</p> pub fn revision(&self) -> i64 { self.revision } } impl std::fmt::Debug for DescribeConfigurationRevisionInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeConfigurationRevisionInput"); formatter.field("arn", &self.arn); formatter.field("revision", &self.revision); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeConfigurationInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub arn: std::option::Option<std::string::String>, } impl DescribeConfigurationInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } } impl std::fmt::Debug for DescribeConfigurationInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeConfigurationInput"); formatter.field("arn", &self.arn); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeClusterOperationInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the MSK cluster operation.</p> pub cluster_operation_arn: std::option::Option<std::string::String>, } impl DescribeClusterOperationInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the MSK cluster operation.</p> pub fn cluster_operation_arn(&self) -> std::option::Option<&str> { self.cluster_operation_arn.as_deref() } } impl std::fmt::Debug for DescribeClusterOperationInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeClusterOperationInput"); formatter.field("cluster_operation_arn", &self.cluster_operation_arn); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DescribeClusterInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, } impl DescribeClusterInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } } impl std::fmt::Debug for DescribeClusterInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DescribeClusterInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteConfigurationInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration.</p> pub arn: std::option::Option<std::string::String>, } impl DeleteConfigurationInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration.</p> pub fn arn(&self) -> std::option::Option<&str> { self.arn.as_deref() } } impl std::fmt::Debug for DeleteConfigurationInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteConfigurationInput"); formatter.field("arn", &self.arn); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct DeleteClusterInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>The current version of the MSK cluster.</p> pub current_version: std::option::Option<std::string::String>, } impl DeleteClusterInput { /// <p>The Amazon Resource Name (ARN) that uniquely identifies the cluster.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>The current version of the MSK cluster.</p> pub fn current_version(&self) -> std::option::Option<&str> { self.current_version.as_deref() } } impl std::fmt::Debug for DeleteClusterInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("DeleteClusterInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("current_version", &self.current_version); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateConfigurationInput { /// <p>The description of the configuration.</p> pub description: std::option::Option<std::string::String>, /// <p>The versions of Apache Kafka with which you can use this MSK configuration.</p> pub kafka_versions: std::option::Option<std::vec::Vec<std::string::String>>, /// <p>The name of the configuration.</p> pub name: std::option::Option<std::string::String>, /// <p>Contents of the <filename>server.properties</filename> file. When using the API, you must ensure that the contents of the file are base64 encoded. /// When using the AWS Management Console, the SDK, or the AWS CLI, the contents of <filename>server.properties</filename> can be in plaintext.</p> pub server_properties: std::option::Option<aws_smithy_types::Blob>, } impl CreateConfigurationInput { /// <p>The description of the configuration.</p> pub fn description(&self) -> std::option::Option<&str> { self.description.as_deref() } /// <p>The versions of Apache Kafka with which you can use this MSK configuration.</p> pub fn kafka_versions(&self) -> std::option::Option<&[std::string::String]> { self.kafka_versions.as_deref() } /// <p>The name of the configuration.</p> pub fn name(&self) -> std::option::Option<&str> { self.name.as_deref() } /// <p>Contents of the <filename>server.properties</filename> file. When using the API, you must ensure that the contents of the file are base64 encoded. /// When using the AWS Management Console, the SDK, or the AWS CLI, the contents of <filename>server.properties</filename> can be in plaintext.</p> pub fn server_properties(&self) -> std::option::Option<&aws_smithy_types::Blob> { self.server_properties.as_ref() } } impl std::fmt::Debug for CreateConfigurationInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateConfigurationInput"); formatter.field("description", &self.description); formatter.field("kafka_versions", &self.kafka_versions); formatter.field("name", &self.name); formatter.field("server_properties", &self.server_properties); formatter.finish() } } #[allow(missing_docs)] // documentation missing in model #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct CreateClusterInput { /// <p>Information about the broker nodes in the cluster.</p> pub broker_node_group_info: std::option::Option<crate::model::BrokerNodeGroupInfo>, /// <p>Includes all client authentication related information.</p> pub client_authentication: std::option::Option<crate::model::ClientAuthentication>, /// <p>The name of the cluster.</p> pub cluster_name: std::option::Option<std::string::String>, /// <p>Represents the configuration that you want MSK to use for the brokers in a cluster.</p> pub configuration_info: std::option::Option<crate::model::ConfigurationInfo>, /// <p>Includes all encryption-related information.</p> pub encryption_info: std::option::Option<crate::model::EncryptionInfo>, /// <p>Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION.</p> pub enhanced_monitoring: std::option::Option<crate::model::EnhancedMonitoring>, /// <p>The settings for open monitoring.</p> pub open_monitoring: std::option::Option<crate::model::OpenMonitoringInfo>, /// <p>The version of Apache Kafka.</p> pub kafka_version: std::option::Option<std::string::String>, #[allow(missing_docs)] // documentation missing in model pub logging_info: std::option::Option<crate::model::LoggingInfo>, /// <p>The number of broker nodes in the cluster.</p> pub number_of_broker_nodes: i32, /// <p>Create tags when creating the cluster.</p> pub tags: std::option::Option<std::collections::HashMap<std::string::String, std::string::String>>, } impl CreateClusterInput { /// <p>Information about the broker nodes in the cluster.</p> pub fn broker_node_group_info( &self, ) -> std::option::Option<&crate::model::BrokerNodeGroupInfo> { self.broker_node_group_info.as_ref() } /// <p>Includes all client authentication related information.</p> pub fn client_authentication( &self, ) -> std::option::Option<&crate::model::ClientAuthentication> { self.client_authentication.as_ref() } /// <p>The name of the cluster.</p> pub fn cluster_name(&self) -> std::option::Option<&str> { self.cluster_name.as_deref() } /// <p>Represents the configuration that you want MSK to use for the brokers in a cluster.</p> pub fn configuration_info(&self) -> std::option::Option<&crate::model::ConfigurationInfo> { self.configuration_info.as_ref() } /// <p>Includes all encryption-related information.</p> pub fn encryption_info(&self) -> std::option::Option<&crate::model::EncryptionInfo> { self.encryption_info.as_ref() } /// <p>Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION.</p> pub fn enhanced_monitoring(&self) -> std::option::Option<&crate::model::EnhancedMonitoring> { self.enhanced_monitoring.as_ref() } /// <p>The settings for open monitoring.</p> pub fn open_monitoring(&self) -> std::option::Option<&crate::model::OpenMonitoringInfo> { self.open_monitoring.as_ref() } /// <p>The version of Apache Kafka.</p> pub fn kafka_version(&self) -> std::option::Option<&str> { self.kafka_version.as_deref() } #[allow(missing_docs)] // documentation missing in model pub fn logging_info(&self) -> std::option::Option<&crate::model::LoggingInfo> { self.logging_info.as_ref() } /// <p>The number of broker nodes in the cluster.</p> pub fn number_of_broker_nodes(&self) -> i32 { self.number_of_broker_nodes } /// <p>Create tags when creating the cluster.</p> pub fn tags( &self, ) -> std::option::Option<&std::collections::HashMap<std::string::String, std::string::String>> { self.tags.as_ref() } } impl std::fmt::Debug for CreateClusterInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("CreateClusterInput"); formatter.field("broker_node_group_info", &self.broker_node_group_info); formatter.field("client_authentication", &self.client_authentication); formatter.field("cluster_name", &self.cluster_name); formatter.field("configuration_info", &self.configuration_info); formatter.field("encryption_info", &self.encryption_info); formatter.field("enhanced_monitoring", &self.enhanced_monitoring); formatter.field("open_monitoring", &self.open_monitoring); formatter.field("kafka_version", &self.kafka_version); formatter.field("logging_info", &self.logging_info); formatter.field("number_of_broker_nodes", &self.number_of_broker_nodes); formatter.field("tags", &self.tags); formatter.finish() } } /// <p>Disassociates sasl scram secrets to cluster.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct BatchDisassociateScramSecretInput { /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>List of AWS Secrets Manager secret ARNs.</p> pub secret_arn_list: std::option::Option<std::vec::Vec<std::string::String>>, } impl BatchDisassociateScramSecretInput { /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>List of AWS Secrets Manager secret ARNs.</p> pub fn secret_arn_list(&self) -> std::option::Option<&[std::string::String]> { self.secret_arn_list.as_deref() } } impl std::fmt::Debug for BatchDisassociateScramSecretInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("BatchDisassociateScramSecretInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("secret_arn_list", &self.secret_arn_list); formatter.finish() } } /// <p>Associates sasl scram secrets to cluster.</p> #[non_exhaustive] #[derive(std::clone::Clone, std::cmp::PartialEq)] pub struct BatchAssociateScramSecretInput { /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub cluster_arn: std::option::Option<std::string::String>, /// <p>List of AWS Secrets Manager secret ARNs.</p> pub secret_arn_list: std::option::Option<std::vec::Vec<std::string::String>>, } impl BatchAssociateScramSecretInput { /// <p>The Amazon Resource Name (ARN) of the cluster to be updated.</p> pub fn cluster_arn(&self) -> std::option::Option<&str> { self.cluster_arn.as_deref() } /// <p>List of AWS Secrets Manager secret ARNs.</p> pub fn secret_arn_list(&self) -> std::option::Option<&[std::string::String]> { self.secret_arn_list.as_deref() } } impl std::fmt::Debug for BatchAssociateScramSecretInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut formatter = f.debug_struct("BatchAssociateScramSecretInput"); formatter.field("cluster_arn", &self.cluster_arn); formatter.field("secret_arn_list", &self.secret_arn_list); formatter.finish() } }
uri_base
component---src-pages-index-js-35b93a744226a63aa04e.js
(window.webpackJsonp=window.webpackJsonp||[]).push([[6],{RXBc:function(e,t,a){"use strict";a.r(t);var n=a("q1tI"),i=a.n(n),r=a("Wbzz"),o=a("7vrA"),l=a("3Z9Z"),s=a("JI6e"),d=a("vOnD"),m=a("ehup"),c=(a("q4sD"),a("aT6C"),a("yMkV"),a("rY4l")),p=a("JwsL"),u=function(e){var t=e.children,a=m.data;return i.a.createElement(i.a.Fragment,null,i.a.createElement(c.a,{siteTitle:a.site.siteMetadata.title}),i.a.createElement("main",null,t),i.a.createElement(p.a,null))},g=a("EYWl"),f=a("U7J8"),x=(a("WBmp"),function(e){return i.a.createElement("div",{className:"Card"},i.a.createElement("img",{src:e.image,alt:e.title}),i.a.createElement("h3",null,e.title),i.a.createElement("p",null,e.text),i.a.createElement(f.a,{className:"CardButton",buttonText:e.buttonText}))}),h=d.a.p.withConfig({displayName:"pages__SectionTitle",componentId:"sc-6kvjaa-0"})(["font-weight:bold;font-size:30px;margin-top:98px;text-align:center;margin-top:2rem;"]),b=d.a.p.withConfig({displayName:"pages__Quote",componentId:"sc-6kvjaa-1"})(["font-weight:bold;font-style:italic;font-size:30px;text-align:center;margin-top:-10px;margin-bottom:-20px;@media (max-width:1200px){margin-bottom:-5px;}@media (max-width:430px){background:#3040c4;color:#ffffff;padding:10px;margin-bottom:0;}@media (max-width:991px){margin-bottom:4rem;}@media (max-width:575px){margin-bottom:2rem;}"]),w=d.a.p.withConfig({displayName:"pages__Highlight",componentId:"sc-6kvjaa-2"})(["display:inline;background:#3040c4;padding:10px;color:#ffffff;@media (max-width:430px){padding-left:0;}"]),E=d.a.img.withConfig({displayName:"pages__Circle",componentId:"sc-6kvjaa-3"})(["position:relative;left:15%;width:85%;margin-top:-30px;@media (max-width:991px){width:95%;left:5%;}@media (max-width:767px){width:100%;left:0;}@media (min-width:575px) and (max-width:610px){width:120%;left:0;}@media (max-width:575px){display:none;}"]),y=d.a.button.withConfig({displayName:"pages__BlueButton",componentId:"sc-6kvjaa-4"})(["background:#3040c4;border:3px solid #3040c4;box-sizing:border-box;border-radius:5px;font-weight:bold;color:#ffffff;width:250px;padding:15px;margin-bottom:40px;transition:1s cubic-bezier(0.2,0.8,0.2,1);margin-left:60px;&:hover{background:#ffffff;border:3px solid #3040c4;color:#3040c4;}@media (max-width:575px){margin:1.5rem 0 0 0;}"]),v=d.a.p.withConfig({displayName:"pages__MissionText",componentId:"sc-6kvjaa-5"})(["line-height:38px;margin-left:60px;font-size:30px;@media (max-width:1086px){font-size:24px;}@media (max-width:991px){font-size:22px;}@media (max-width:767px){font-size:16px;line-height:32px;}@media (max-width:575px){text-align:center;margin:0rem 2rem 0 2rem;}"]),k=d.a.div.withConfig({displayName:"pages__MissionDiv",componentId:"sc-6kvjaa-6"})(["@media (max-width:575px){text-align:center;}"]),C=d.a.div.withConfig({displayName:"pages__OnlineCourses",componentId:"sc-6kvjaa-7"})(["box-shadow:0px 4px 50px rgba(0,0,0,0.25);border-radius:10px;overflow:hidden;margin-top:3rem;background:#f3c6bd;@media (max-width:767px){padding-top:1rem;p{font-size:17.5px;}}"]),_=d.a.div.withConfig({displayName:"pages__CourseInfo",componentId:"sc-6kvjaa-8"})(["padding-right:30px;padding-left:15px;text-align:center;height:100%;background:#f3c6bd;h3{font-size:2rem;font-weight:bold;}@media (max-width:991px){padding-top:40px;padding-bottom:40px;p{font-size:15px;}}@media (max-width:767px){padding-left:2rem;padding-right:2rem;padding-top:1rem;p{font-size:17.5px;}}"]);t.default=function(){return i.a.createElement(u,null,i.a.createElement(g.a,{title:"Home"}),i.a.createElement(h,{style:{marginTop:"9rem"}},"Academic Services"),i.a.createElement(o.a,null,i.a.createElement(l.a,{style:{marginTop:"3rem"}},i.a.createElement(s.a,{sm:12,md:6},i.a.createElement(x,{image:a("dK/e"),title:"Study Support",text:"A tailor-made study program that yields academic excellence through measurable daily improvements.",buttonText:"Learn More"})),i.a.createElement(s.a,{sm:12,md:6},i.a.createElement(x,{className:"cardSpacing",image:a("ouvs"),title:"Tutoring",text:"Our carefully selected team of high achievers tutor over 200 students privately across Victoria.",buttonText:"Get Tutoring"})))),i.a.createElement(h,null,"Our Mission"),i.a.createElement(b,null,'"Give you control ',i.a.createElement(w,null,'your marks."')),i.a.createElement("div",{style:{overflow:"hidden"}},i.a.createElement(l.a,null,i.a.createElement(s.a,{xs:12,sm:7,style:{margin:"auto 0"}},i.a.createElement(k,null,i.a.createElement(v,null,"The team at LessonUp treats every student uniquely. We"," ",i.a.createElement("b",null,i.a.createElement("i",null,"strongly ")),"disagree with a one size fits all approach when it comes to education. The first session will be an assessment so we understand your strengths and weakness. Using the results from our assessment we tailor a specific plan for you."),i.a.createElement(r.Link,{to:"./about-us"},i.a.createElement(y,null,"Learn More")))),i.a.createElement(s.a,{sm:5,style:{paddingRight:0}},i.a.createElement(E,{src:a("egNA")})))),i.a.createElement(h,null,"Online Courses"),i.a.createElement(o.a,null,i.a.createElement(C,null,i.a.createElement(l.a,{style:{background:"#F3C6BD"}},i.a.createElement(s.a,{md:6,style:{margin:"auto 0"}},i.a.createElement("img",{src:a("lwat"),style:{width:"100%"},alt:"Master CAS Online Course"})),i.a.createElement(s.a,{md:6,style:{margin:"auto 0"}},i.a.createElement(_,null,i.a.createElement("h3",null,"Master the CAS"),i.a.createElement("p",null,"Save time, become a CAS expert. Learn how to efficiently & effectively use your graphing calculator."),i.a.createElement(r.Link,{to:"./online-courses"},i.a.createElement(y,{style:{width:"100%",margin:0}},"Learn More"))))))))}},U7J8:function(e,t,a){"use strict";var n=a("q1tI"),i=a.n(n),r=a("vOnD").a.button.withConfig({displayName:"WhiteButton__Button",componentId:"sc-1b69okb-0"})(["background:#ffffff;border:3px solid #3040c4;box-sizing:border-box;border-radius:5px;font-weight:bold;color:#3040c4;transition:1s cubic-bezier(0.2,0.8,0.2,1);&:hover{background:#3040c4;border:3px solid #3040c4;color:#ffffff;}"]);t.a=function(e){return i.a.createElement(r,{className:e.className,style:e.style},e.buttonText)}},"dK/e":function(e,t,a){e.exports=a.p+"static/lessonup-stairs-5a25435f5769d0ad59a5cdf9db928eaf.png"},egNA:function(e,t,a){e.exports=a.p+"static/lessonup-reading-v2-adf63aa55e908dbabd529a617e2041d3.png"},ehup:function(e){e.exports=JSON.parse('{"data":{"site":{"id":"Site","siteMetadata":{"title":"LessonUp"}}}}')},lwat:function(e,t,a){e.exports=a.p+"static/lessonup-error-2c9c70d4ab643cae67e975336636e491.png"},ouvs:function(e,t,a){e.exports=a.p+"static/lessonup-reading-d8c08853c1b93d525e14d17bddacaff3.png"}}]);
//# sourceMappingURL=component---src-pages-index-js-35b93a744226a63aa04e.js.map
dhcore.py
import sys, os, inspect from ctypes import * import math MY_DIR = os.path.dirname(os.path.abspath(inspect.getframeinfo(inspect.currentframe())[0])) HELPER_DIR = os.path.abspath(os.path.join(MY_DIR, '..', 'helpers')) sys.path.append(HELPER_DIR) import dhlog class _API: is_init = False @staticmethod def init(debug=False): if _API.is_init: return postfix = '' if debug: postfix = '-dbg' if sys.platform == 'win32': shlib = 'dhcore' + postfix + '.dll' elif sys.platform == 'linux': shlib = 'libdhcore' + postfix + '.so' # load library try: dhcorelib = cdll.LoadLibrary(shlib) except: dhlog.Log.warn(str(sys.exc_info()[1])) dhlog.Log.fatal('could not load dynamic library %s' % shlib) sys.exit(-1) dhlog.Log.msgline('module "%s" loaded' % shlib, dhlog.TERM_GREEN) # core.h _API.core_init = dhcorelib.core_init _API.core_init.restype = c_int _API.core_init.argtypes = [c_uint] _API.core_release = dhcorelib.core_release _API.core_release.argtypes = [c_int] # err.h _API.err_getstring = dhcorelib.err_getstring _API.err_getstring.restype = c_char_p # log.h _API.log_outputconsole = dhcorelib.log_outputconsole _API.log_outputconsole.restype = c_uint _API.log_outputconsole.argtypes = [c_int] _API.log_outputfile = dhcorelib.log_outputfile _API.log_outputfile.restype = c_uint _API.log_outputfile.argtypes = [c_int, c_char_p] _API.log_isfile = dhcorelib.log_isfile _API.log_isfile.restype = c_int _API.log_isconsole = dhcorelib.log_isconsole _API.log_isconsole.restype = c_int _API.log_print = dhcorelib.log_print _API.log_print.argtypes = [c_uint, c_char_p] # file-io.h _API.fio_addvdir = dhcorelib.fio_addvdir _API.fio_addvdir.restype = c_int _API.fio_addvdir.argtypes = [c_char_p, c_int] # vec-math.h _API.mat3_muls = dhcorelib.mat3_muls _API.mat3_muls.restype = POINTER(Matrix3) _API.mat3_muls.argtypes = [POINTER(Matrix3), POINTER(Matrix3), c_float] _API.mat3_set_roteuler = dhcorelib.mat3_set_roteuler _API.mat3_set_roteuler.restype = POINTER(Matrix3) _API.mat3_set_roteuler.argtypes = [POINTER(Matrix3), c_float, c_float, c_float] _API.quat_slerp = dhcorelib.quat_slerp _API.quat_slerp.restype = POINTER(Quat) _API.quat_slerp.argtypes = [POINTER(Quat), POINTER(Quat), POINTER(Quat), c_float] _API.quat_fromaxis = dhcorelib.quat_fromaxis _API.quat_fromaxis.restype = POINTER(Quat) _API.quat_fromaxis.argtypes = [POINTER(Quat), POINTER(Vec3), c_float] _API.quat_fromeuler = dhcorelib.quat_fromeuler _API.quat_fromeuler.restype = POINTER(Quat) _API.quat_fromeuler.argtypes = [POINTER(Quat), c_float, c_float, c_float] _API.quat_frommat3 = dhcorelib.quat_frommat3 _API.quat_frommat3.restype = POINTER(Quat) _API.quat_frommat3.argtypes = [POINTER(Quat), POINTER(Matrix3)] _API.mat3_inv = dhcorelib.mat3_inv _API.mat3_inv.restype = POINTER(Matrix3) _API.mat3_inv.argtypes = [POINTER(Matrix3), POINTER(Matrix3)] _API.mat3_set_rotaxis = dhcorelib.mat3_set_rotaxis _API.mat3_set_rotaxis.restype = POINTER(Matrix3) _API.mat3_set_rotaxis.argtypes = [POINTER(Matrix3), POINTER(Vec3), c_float] _API.mat3_set_roteuler = dhcorelib.mat3_set_roteuler _API.mat3_set_roteuler.restype = POINTER(Matrix3) _API.mat3_set_roteuler.argtypes = [POINTER(Matrix3), c_float, c_float, c_float] _API.mat3_set_rotquat = dhcorelib.mat3_set_rotquat _API.mat3_set_rotquat.restype = POINTER(Matrix3) _API.mat3_set_rotquat.argtypes = [POINTER(Matrix3), POINTER(Quat)] _API.mat3_inv = dhcorelib.mat3_inv _API.mat3_inv.restype = POINTER(Matrix3) _API.mat3_inv.argtypes = [POINTER(Matrix3), POINTER(Matrix3)] _API.mat3_det = dhcorelib.mat3_det _API.mat3_det.restype = c_float _API.mat3_det.argtypes = [POINTER(Matrix3)] _API.is_init = True def IS_FAIL(r): if r <= 0: return True else: return False INVALID_HANDLE = 0xffffffffffffffff INVALID_INDEX = 0xffffffff def to_cstr(s): return create_string_buffer(s.encode('ascii')) class Errors: @staticmethod def last_error(): r = _API.err_getstring() return r.decode() class Log: class LogType: TEXT = 0 ERROR = 1 WARNING = 3, INFO = 3, LOAD = 4 @staticmethod def set_console_output(enable): _API.log_outputconsole(c_int(enable)) @staticmethod def set_file_output(logfile): if logfile != None: _API.log_outputfile(c_int(True), create_string_buffer(logfile.encode('ascii'))) else: _API.log_outputfile(c_int(False), None) @staticmethod def msg(log_type, msg): _API.log_print(c_uint(log_type), create_string_buffer(msg.encode('ascii'))) class Core: class InitFlags(): TRACE_MEM = (1<<0) CRASH_DUMP = (1<<1) LOGGER = (1<<2) ERRORS = (1<<3) JSON = (1<<4) FILE_IO = (1<<5) TIMER = (1<<6) ALL = 0xffffffff @staticmethod def init(flags = InitFlags.ALL): if IS_FAIL(_API.core_init(c_uint(flags))): raise Exception(_API.err_getstring()) @staticmethod def release(report_leaks = True): _API.core_release(c_int(report_leaks)) class Vec3(Structure): _fields_ = [('x', c_float), ('y', c_float), ('z', c_float), ('w', c_float)] def __init__(self, _x = 0, _y = 0, _z = 0, _w = 1): self.x = _x self.y = _y self.z = _z self.w = 1 def __add__(a, b): return Vec3(a.x + b.x, a.y + b.y, a.z + b.z) def __mul__(a, b): if type(b) is float or type(b) is int: return Vec3(a.x*b, a.y*b, a.z*b) elif type(b) is Matrix3: return Vec3(\ a.x*b.m11 + a.y*b.m21 + a.z*b.m31 + b.m41, a.x*b.m12 + a.y*b.m22 + a.z*b.m32 + b.m42, a.x*b.m13 + a.y*b.m23 + a.z*b.m33 + b.m43);
def __div__(a, b): return Vec3(a.x/b, a.y/b, a.z/b) def __eq__(a, b): if a.x == b.x and a.y == b.y and a.z == b.z: return True else: return False def __sub__(a, b): return Vec3(a.x - b.x, a.y - b.y, a.z - b.z) def get_length(self): return math.sqrt(self.x*self.x + self.y*self.y + self.z*self.z) length = property(get_length) @staticmethod def dot(a, b): return a.x*b.x + a.y*b.y + a.z*b.z @staticmethod def normalize(v): scale = 1.0 / v.length return Vec3(v.x*scale, v.y*scale, v.z*scale) @staticmethod def cross(v1, v2): return Vec3(v1.y*v2.z - v1.z*v2.y, v1.z*v2.x - v1.x*v2.z, v1.x*v2.y - v1.y*v2.x) @staticmethod def lerp(v1, v2, t): return Vec3(\ v1.x + t*(v2.x - v1.x), v1.y + t*(v2.y - v1.y), v1.z + t*(v2.z - v1.z)) def __str__(self): return 'Vec3: %f, %f, %f' % (self.x, self.y, self.z) class Vec2(Structure): _fields_ = [('x', c_float), ('y', c_float)] def __init__(self, _x = 0, _y = 0): self.x = _x self.y = _y def copy(self): return Vec2(self.x, self.y) def __add__(a, b): return Vec2(a.x + b.x, a.y + b.y) def __sub__(a, b): return Vec2(a.x - b.x, a.y - b.y) def __mul__(a, b): return Vec2(a.x*b, a.y*b) def __div__(a, b): return Vec2(a.x/b, a.y/b) def __str__(self): return 'Vec2: %f, %f' % (self.x, self.y) class Vec2i(Structure): _fields_ = [('x', c_int), ('y', c_int)] def __init__(self, _x = 0, _y = 0): self.x = int(_x) self.y = int(_y) def copy(self): return Vec2i(self.x, self.y) def __add__(a, b): return Vec2(a.x + b.x, a.y + b.y) def __sub__(a, b): return Vec2(a.x - b.x, a.y - b.y) def __mul__(a, b): return Vec2(a.x*b, a.y*b) def __str__(self): return 'Vec2i: %d, %d' % (self.x, self.y) class Vec4(Structure): _fields_ = [('x', c_float), ('y', c_float), ('z', c_float), ('w', c_float)] def __init__(self, _x = 0, _y = 0, _z = 0, _w = 1): self.x = _x self.y = _y self.z = _z self.w = 1 def copy(self): return Vec4(self.x, self.y, self.z, self.w) def __add__(a, b): return Vec4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w) def __sub__(a, b): return Vec4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w) def __mul__(a, b): return Vec4(a.x*b, a.y*b, a.z*b, a.w*b) def __div__(a, b): return Vec4(a.x/b, a.y/b, a.z/b, a.w/b) def __str__(self): return 'Vec4: %f, %f, %f, %f' % (self.x, self.y, self.z, self.w) class Color(Structure): _fields_ = [('r', c_float), ('g', c_float), ('b', c_float), ('a', c_float)] def __init__(self, _r = 0, _g = 0, _b = 0, _a = 1): self.r = _r self.g = _g self.b = _b self.a = _a def copy(self): return Color(self.r, self.g, self.b, self.a) def __mul__(a, b): return Color(a.r*b, a.g*b, a.g*b, a.a) def __mul__(a, b): return Color(a.r*b.r, a.g*b.g, a.g*b.b, min(a.a, b.a)) def __add__(a, b): return Color(a.r+b.r, a.g+b.g, a.b+b.b, max(a.a, b.a)) @staticmethod def lerp(c1, c2, t): tinv = 1 - t return Color( c1.r*t + c2.r*tinv, c1.g*t + c2.g*tinv, c1.b*t + c2.b*tinv, c1.a*t + c2.a*tinv) class Quat(Structure): _fields_ = [('x', c_float), ('y', c_float), ('z', c_float), ('w', c_float)] def __init__(self, _x = 0, _y = 0, _z = 0, _w = 1): self.x = _x self.y = _y self.z = _z self.w = _w def copy(self): return Color(self.x, self.y, self.z, self.w) def __mul__(q1, q2): return Quat(\ q1.w*q2.x + q1.x*q2.w + q1.z*q2.y - q1.y*q2.z, q1.w*q2.y + q1.y*q2.w + q1.x*q2.z - q1.z*q2.x, q1.w*q2.z + q1.z*q2.w + q1.y*q2.x - q1.x*q2.y, q1.w*q2.w - q1.x*q2.x - q1.y*q2.y - q1.z*q2.z) def __eq__(q1, q2): if q1.x == q2.x and q1.y == q2.y and q1.z == q2.z and q1.w == q2.w: return True else: return False def from_axis(self, axis, angle): _API.quat_fromaxis(byref(self), byref(axis), c_float(angle)) def from_euler(self, pitch, yaw, roll): _API.quat_fromeuler(byref(self), c_float(pitch), c_float(yaw), c_float(roll)) def from_matrix3(self, mat): _API.quat_frommat3(byref(self), byref(mat)) @staticmethod def inverse(q): return Quat(-q.x, -q.y, -q.z, q.w) @staticmethod def slerp(q1, q2, t): q = Quat() _API.quat_slerp(byref(q), byref(q1), byref(q2), c_float(t)) return q def __str__(self): return 'Quat: %f %f %f %f' % (self.x, self.y, self.z, self.w) class Matrix3(Structure): _fields_ = [\ ('m11', c_float), ('m12', c_float), ('m13', c_float), ('m14', c_float), ('m21', c_float), ('m22', c_float), ('m23', c_float), ('m24', c_float), ('m31', c_float), ('m32', c_float), ('m33', c_float), ('m34', c_float), ('m41', c_float), ('m42', c_float), ('m43', c_float), ('m44', c_float)] def __init__(self, _m11 = 1, _m12 = 0, _m13 = 0, _m21 = 0, _m22 = 1, _m23 = 0, _m31 = 0, _m32 = 0, _m33 = 1, _m41 = 0, _m42 = 0, _m43 = 0): self.m11 = _m11 self.m12 = _m12 self.m13 = _m13 self.m21 = _m21 self.m22 = _m22 self.m23 = _m23 self.m31 = _m31 self.m32 = _m32 self.m33 = _m33 self.m41 = _m41 self.m42 = _m42 self.m43 = _m43 def copy(self): return Matrix3(\ self.m11, self.m12, self.m13, self.m21, self.m22, self.m23, self.m31, self.m32, self.m33, self.m41, self.m42, self.m43) def __mul__(a, b): if type(b) is float or type(b) is int: return Matrix3(\ a.m11*b, a.m21*b, a.m31*b, a.m41*b, a.m12*b, a.m22*b, a.m32*b, a.m42*b, a.m13*b, a.m23*b, a.m33*b, a.m43*b) else: return Matrix3(\ a.m11*b.m11 + a.m12*b.m21 + a.m13*b.m31, a.m11*b.m12 + a.m12*b.m22 + a.m13*b.m32, a.m11*b.m13 + a.m12*b.m23 + a.m13*b.m33, a.m21*b.m11 + a.m22*b.m21 + a.m23*b.m31, a.m21*b.m12 + a.m22*b.m22 + a.m23*b.m32, a.m21*b.m13 + a.m22*b.m23 + a.m23*b.m33, a.m31*b.m11 + a.m32*b.m21 + a.m33*b.m31, a.m31*b.m12 + a.m32*b.m22 + a.m33*b.m32, a.m31*b.m13 + a.m32*b.m23 + a.m33*b.m33, a.m41*b.m11 + a.m42*b.m21 + a.m43*b.m31 + b.m41, a.m41*b.m12 + a.m42*b.m22 + a.m43*b.m32 + b.m42, a.m41*b.m13 + a.m42*b.m23 + a.m43*b.m33 + b.m43); def translate(self, x, y, z): self.m41 = x self.m42 = y self.m43 = z def translate(self, v): self.m41 = v.x self.m42 = v.y self.m43 = v.z def rotate_euler(self, pitch, yaw, roll): _API.mat3_set_roteuler(byref(self), c_float(pitch), c_float(yaw), c_float(roll)) def rotate_quat(self, q): _API.mat3_set_rotquat(byref(self), byref(q)) def rotate_axis(self, axis, angle): _API.mat3_set_rotaxis(byref(self), byref(axis), c_float(angle)) def scale(self, sx, sy, sz): self.m11 = sx self.m22 = sy self.m33 = sz def __get_determinant(self): return _API.mat3_det(byref(self)) determinant = property(__get_determinant) def __get_translation(self): return Vec3(self.m41, self.m42, self.m43) translation = property(__get_translation) @staticmethod def transpose(m): return Matrix3(\ self.m11, self.m21, self.m31, self.m12, self.m22, self.m32, self.m13, self.m23, self.m33, self.m14, self.m24, self.m34) @staticmethod def invert(m): r = Matrix3() _API.mat3_inv(byref(r), byref(m)) return r class Matrix4(Structure): _fields_ = [\ ('m11', c_float), ('m12', c_float), ('m13', c_float), ('m14', c_float), ('m21', c_float), ('m22', c_float), ('m23', c_float), ('m24', c_float), ('m31', c_float), ('m32', c_float), ('m33', c_float), ('m34', c_float), ('m41', c_float), ('m42', c_float), ('m43', c_float), ('m44', c_float)] def __init__(self, _m11 = 1, _m12 = 0, _m13 = 0, _m14 = 0, _m21 = 0, _m22 = 1, _m23 = 0, _m24 = 0, _m31 = 0, _m32 = 0, _m33 = 1, _m34 = 0, _m41 = 0, _m42 = 0, _m43 = 0, _m44 = 1): self.m11 = _m11 self.m12 = _m12 self.m13 = _m13 self.m14 = _m14 self.m21 = _m21 self.m22 = _m22 self.m23 = _m23 self.m24 = _m24 self.m31 = _m31 self.m32 = _m32 self.m33 = _m33 self.m34 = _m34 self.m41 = _m41 self.m42 = _m42 self.m43 = _m43 self.m44 = _m44 def copy(self): return Matrix4(\ self.m11, self.m12, self.m13, self.m14, self.m21, self.m22, self.m23, self.m24, self.m31, self.m32, self.m33, self.m34, self.m41, self.m42, self.m43, self.m44) class Math: PI = 3.14159265 @staticmethod def to_rad(x): return x*Math.PI/180.0 @staticmethod def to_deg(x): return 180.0*x/Math.PI class FileIO: @staticmethod def add_virtual_path(path, monitor=False): path = os.path.abspath(os.path.expanduser(path)) if not _API.fio_addvdir(to_cstr(path), c_int(monitor)): raise Exception(Errors.last_error()) class Variant(Structure): class VarType: BOOL = 1 INT = 2 UINT = 3 FLOAT = 4 FLOAT2 = 5 FLOAT3 = 6 FLOAT4 = 7 INT2 = 8 INT3 = 9 INT4 = 10 STRING = 11 class _Value(Union): _fields_ = [\ ('b', c_int), ('i', c_int), ('ui', c_uint), ('f', c_float), ('fv', c_float*4), ('iv', c_int*4), ('s', c_char*16)] _fields_ = [('type', c_uint), ('value', _Value)] def set_value(self, v): if type(v) is bool: self.type = Variant.VarType.BOOL self.value.b = int(v) elif type(v) is int: self.type = Variant.VarType.INT self.value.i = v elif type(v) is float: self.type = Variant.VarType.FLOAT self.value.f = v elif type(v) is Vec2: self.type = Variant.VarType.FLOAT2 self.value.fv[0] = v.x self.value.fv[1] = v.y elif type(v) is Vec3: self.type = Variant.VarType.FLOAT3 self.value.fv[0] = v.x self.value.fv[1] = v.y self.value.fv[2] = v.z elif type(v) is Vec2i: self.type = Variant.VarType.INT2 self.value.iv[0] = v.x self.value.iv[1] = v.y elif (type(v) is Color) or (type(v) is Vec4): self.type = Variant.VarType.FLOAT4 self.value.fv[0] = v.x self.value.fv[1] = v.y self.value.fv[2] = v.z self.value.fv[3] = v.w elif type(v) is str: self.type = Variant.VarType.STRING self.value.s = to_cstr(v) else: raise Exception('unknown type') def get_value(self): if self.type == Variant.VarType.BOOL: return self.value.b elif self.type == Variant.VarType.INT: return self.value.i elif self.type == Variant.VarType.FLOAT: return self.value.f elif self.type == Variant.VarType.FLOAT2: return Vec2(self.value.fv[0], self.value.fv[1]) elif self.type == Variant.VarType.FLOAT3: return Vec3(self.value.fv[0], self.value.fv[1], self.value.fv[2]) elif self.type == Variant.VarType.INT2: return Vec2i(self.value.iv[0], self.value.iv[1]) elif self.type == Variant.VarType.FLOAT4: return Vec4(self.value.fv[0], self.value.fv[1], self.value.fv[2], self.value.fv[3]) elif self.type == Variant.VarType.STRING: return self.value.s else: raise Exception('unknown type') _API.init(debug = ('--debug' in sys.argv))
def copy(self): return Vec3(self.x, self.y, self.z)
sb.js
const config = require('./config.json'); // const winston = require('winston'); // const logger = new winston(); // ? Steam const SteamUser = require('steam-user'); const Steam = require('steam'); const SteamTotp = require('steam-totp'); const SteamCommunity = require('steamcommunity'); const TradeOfferManager = require('steam-tradeoffer-manager'); const steamClient = new Steam.SteamClient();
const client = new SteamUser(); const community = new SteamCommunity(); const steamFriends = new Steam.SteamFriends(steamClient); const manager = new TradeOfferManager({ steam: client, community: community, language: 'en' }); const logOnOptions = { accountName: config.username, password: config.password, // twoFactorCode: SteamTotp.generateAuthCode(config.sharedSecret) }; console.log(`starting application ${config.appname}`); client.logOn(logOnOptions); client.on('logon', () => { console.log(`Trying Log In as ${config.username}`) }) // ? Discord // const Discord = require('discord.js'); // const discordClient = new Discord.client(); // const discordOptions = { // token : config.token // } // discordClient.login(discordOptions.token); // ! Discord client.on('ready', () => { console.log(`Logged in as ${client.user.tag}!`); }); // client.on('loggedOn', () => { // console.log('Telah login ke steam'); // client.setPersona(SteamUser.Steam.EPersonaState.Online, config.botName); // client.gamesPlayed('Accepting Donation'); // }); // client.on('webSession', (sessionid, cookies) => { // manager.setCookies(cookies); // community.setCookies(cookies); // community.startConfirmationChecker(10000, config.sharedSecret); // }); // ! Steam manager.on('newOffer', (offer) => { if (offer.itemsToGive.length = offer.itemToGet.length ) { offer.accept((err, status) => { if (err) { console.log(err); } else { console.log(`1:1`); } }); } else { offer.decline((err) => { if (err) { console.log(err); } else { console.log('error!'); } }); } }); client.on('friendRelationship', (steamid, relationship) => { if (relationship === 2) { client.addFriend(steamid); client.chatMessage(steamid, 'Hai! Terimakasih telah meng-add bot ini, ketik !help untuk melihat commands'); } }); steamFriends.on('message', function(source, message, type, chatter) { console.log('Received message: ' + message); if (message == '!help') { steamFriends.sendMessage(source, 'Command yang tersedia : !ping, !owner', Steam.EChatEntryType.ChatMsg); } }); steamFriends.on('message', function(source, message, type, chatter) { console.log('Received message: ' + message); if (message == '!owner') { steamFriends.sendMessage(source, 'Hai, saya tiwa, saya dibuat dengan bahasa Node.Js, pembuat saya adalah : steamcommunity.com/id/alandtiwa. ', Steam.EChatEntryType.ChatMsg); // ChatMsg by default } });
kernel_impl.py
import ast import functools import inspect import re import sys import textwrap import numpy as np import taichi.lang from taichi._lib import core as _ti_core from taichi.lang import impl, runtime_ops from taichi.lang.ast import (ASTTransformerContext, KernelSimplicityASTChecker, transform_tree) from taichi.lang.enums import Layout from taichi.lang.exception import (TaichiCompilationError, TaichiRuntimeTypeError, TaichiSyntaxError) from taichi.lang.expr import Expr from taichi.lang.matrix import MatrixType from taichi.lang.shell import _shell_pop_print, oinspect from taichi.lang.util import has_pytorch, to_taichi_type from taichi.linalg.sparse_matrix import sparse_matrix_builder from taichi.types import any_arr, primitive_types, template from taichi import _logging if has_pytorch(): import torch def func(fn): """Marks a function as callable in Taichi-scope. This decorator transforms a Python function into a Taichi one. Taichi will JIT compile it into native instructions. Args: fn (Callable): The Python function to be decorated Returns: Callable: The decorated function Example:: >>> @ti.func >>> def foo(x): >>> return x + 2 >>> >>> @ti.kernel >>> def run(): >>> print(foo(40)) # 42 """ is_classfunc = _inside_class(level_of_class_stackframe=3) fun = Func(fn, _classfunc=is_classfunc) @functools.wraps(fn) def decorated(*args): return fun.__call__(*args) decorated._is_taichi_function = True return decorated def pyfunc(fn): """Marks a function as callable in both Taichi and Python scopes. When called inside the Taichi scope, Taichi will JIT compile it into native instructions. Otherwise it will be invoked directly as a Python function. See also :func:`~taichi.lang.kernel_impl.func`. Args: fn (Callable): The Python function to be decorated Returns: Callable: The decorated function """ is_classfunc = _inside_class(level_of_class_stackframe=3) fun = Func(fn, _classfunc=is_classfunc, _pyfunc=True) @functools.wraps(fn) def decorated(*args): return fun.__call__(*args) decorated._is_taichi_function = True return decorated def _get_tree_and_ctx(self, excluded_parameters=(), is_kernel=True, arg_features=None, args=None, ast_builder=None): file = oinspect.getsourcefile(self.func) src, start_lineno = oinspect.getsourcelines(self.func) src = [textwrap.fill(line, tabsize=4, width=9999) for line in src] tree = ast.parse(textwrap.dedent("\n".join(src))) func_body = tree.body[0] func_body.decorator_list = [] global_vars = _get_global_vars(self.func) for i, arg in enumerate(func_body.args.args): anno = arg.annotation if isinstance(anno, ast.Name): global_vars[anno.id] = self.argument_annotations[i] if isinstance(func_body.returns, ast.Name): global_vars[func_body.returns.id] = self.return_type if is_kernel or impl.get_runtime().experimental_real_function: # inject template parameters into globals for i in self.template_slot_locations: template_var_name = self.argument_names[i] global_vars[template_var_name] = args[i] return tree, ASTTransformerContext(excluded_parameters=excluded_parameters, is_kernel=is_kernel, func=self, arg_features=arg_features, global_vars=global_vars, argument_data=args, src=src, start_lineno=start_lineno, file=file, ast_builder=ast_builder) class Func: function_counter = 0 def __init__(self, _func, _classfunc=False, _pyfunc=False): self.func = _func self.func_id = Func.function_counter Func.function_counter += 1 self.compiled = None self.classfunc = _classfunc self.pyfunc = _pyfunc self.argument_annotations = [] self.argument_names = [] self.return_type = None self.extract_arguments() self.template_slot_locations = [] for i, anno in enumerate(self.argument_annotations): if isinstance(anno, template): self.template_slot_locations.append(i) self.mapper = TaichiCallableTemplateMapper( self.argument_annotations, self.template_slot_locations) self.taichi_functions = {} # The |Function| class in C++ def __call__(self, *args): if not impl.inside_kernel(): if not self.pyfunc: raise TaichiSyntaxError( "Taichi functions cannot be called from Python-scope." " Use @ti.pyfunc if you wish to call Taichi functions " "from both Python-scope and Taichi-scope.") return self.func(*args) if impl.get_runtime().experimental_real_function: if impl.get_runtime().current_kernel.is_grad: raise TaichiSyntaxError( "Real function in gradient kernels unsupported.") instance_id, _ = self.mapper.lookup(args) key = _ti_core.FunctionKey(self.func.__name__, self.func_id, instance_id) if self.compiled is None: self.compiled = {} if key.instance_id not in self.compiled: self.do_compile(key=key, args=args) return self.func_call_rvalue(key=key, args=args) tree, ctx = _get_tree_and_ctx( self, is_kernel=False, args=args, ast_builder=impl.get_runtime().prog.current_ast_builder()) ret = transform_tree(tree, ctx) if not impl.get_runtime().experimental_real_function: if self.return_type and not ctx.returned: raise TaichiSyntaxError( "Function has a return type but does not have a return statement" ) return ret def func_call_rvalue(self, key, args): # Skip the template args, e.g., |self| assert impl.get_runtime().experimental_real_function non_template_args = [] for i, anno in enumerate(self.argument_annotations): if not isinstance(anno, template): non_template_args.append(args[i]) non_template_args = impl.make_expr_group(non_template_args) return Expr( _ti_core.make_func_call_expr( self.taichi_functions[key.instance_id], non_template_args)) def do_compile(self, key, args): tree, ctx = _get_tree_and_ctx(self, is_kernel=False, args=args) fn = impl.get_runtime().prog.create_function(key) def func_body(): ctx.ast_builder = fn.ast_builder() transform_tree(tree, ctx) self.taichi_functions[key.instance_id] = fn self.compiled[key.instance_id] = func_body self.taichi_functions[key.instance_id].set_function_body(func_body) def extract_arguments(self): sig = inspect.signature(self.func) if sig.return_annotation not in (inspect._empty, None): self.return_type = sig.return_annotation params = sig.parameters arg_names = params.keys() for i, arg_name in enumerate(arg_names): param = params[arg_name] if param.kind == inspect.Parameter.VAR_KEYWORD: raise TaichiSyntaxError( 'Taichi functions do not support variable keyword parameters (i.e., **kwargs)' ) if param.kind == inspect.Parameter.VAR_POSITIONAL: raise TaichiSyntaxError( 'Taichi functions do not support variable positional parameters (i.e., *args)' ) if param.kind == inspect.Parameter.KEYWORD_ONLY: raise TaichiSyntaxError( 'Taichi functions do not support keyword parameters') if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD: raise TaichiSyntaxError( 'Taichi functions only support "positional or keyword" parameters' ) annotation = param.annotation if annotation is inspect.Parameter.empty: if i == 0 and self.classfunc: annotation = template() # TODO: pyfunc also need type annotation check when real function is enabled, # but that has to happen at runtime when we know which scope it's called from. elif not self.pyfunc and impl.get_runtime( ).experimental_real_function: raise TaichiSyntaxError( f'Taichi function `{self.func.__name__}` parameter `{arg_name}` must be type annotated' ) else: if not id(annotation ) in primitive_types.type_ids and not isinstance( annotation, template): raise TaichiSyntaxError( f'Invalid type annotation (argument {i}) of Taichi function: {annotation}' ) self.argument_annotations.append(annotation) self.argument_names.append(param.name) class TaichiCallableTemplateMapper: def __init__(self, annotations, template_slot_locations): self.annotations = annotations self.num_args = len(annotations) self.template_slot_locations = template_slot_locations self.mapping = {} @staticmethod def extract_arg(arg, anno): if isinstance(anno, template): if isinstance(arg, taichi.lang.snode.SNode): return arg.ptr if isinstance(arg, taichi.lang.expr.Expr): return arg.ptr.get_underlying_ptr_address() if isinstance(arg, _ti_core.Expr): return arg.get_underlying_ptr_address() if isinstance(arg, tuple): return tuple( TaichiCallableTemplateMapper.extract_arg(item, anno) for item in arg) return arg if isinstance(anno, any_arr): if isinstance(arg, taichi.lang._ndarray.ScalarNdarray): anno.check_element_dim(arg, 0) anno.check_element_shape(()) anno.check_field_dim(len(arg.shape)) return arg.dtype, len(arg.shape), (), Layout.AOS if isinstance(arg, taichi.lang.matrix.VectorNdarray): anno.check_element_dim(arg, 1) anno.check_element_shape((arg.n, )) anno.check_field_dim(len(arg.shape)) anno.check_layout(arg) return arg.dtype, len(arg.shape) + 1, (arg.n, ), arg.layout if isinstance(arg, taichi.lang.matrix.MatrixNdarray): anno.check_element_dim(arg, 2) anno.check_element_shape((arg.n, arg.m)) anno.check_field_dim(len(arg.shape)) anno.check_layout(arg) return arg.dtype, len(arg.shape) + 2, (arg.n, arg.m), arg.layout # external arrays element_dim = 0 if anno.element_dim is None else anno.element_dim layout = Layout.AOS if anno.layout is None else anno.layout shape = tuple(arg.shape) if len(shape) < element_dim: raise ValueError( f"Invalid argument into ti.any_arr() - required element_dim={element_dim}, " f"but the argument has only {len(shape)} dimensions") element_shape = ( ) if element_dim == 0 else shape[: element_dim] if layout == Layout.SOA else shape[ -element_dim:] return to_taichi_type(arg.dtype), len(shape), element_shape, layout # Use '#' as a placeholder because other kinds of arguments are not involved in template instantiation return '#' def extract(self, args): extracted = [] for arg, anno in zip(args, self.annotations): extracted.append(self.extract_arg(arg, anno)) return tuple(extracted) def lookup(self, args): if len(args) != self.num_args: raise TypeError( f'{self.num_args} argument(s) needed but {len(args)} provided.' ) key = self.extract(args) if key not in self.mapping: count = len(self.mapping) self.mapping[key] = count return self.mapping[key], key def _get_global_vars(_func): # Discussions: https://github.com/taichi-dev/taichi/issues/282 global_vars = _func.__globals__.copy() freevar_names = _func.__code__.co_freevars closure = _func.__closure__ if closure: freevar_values = list(map(lambda x: x.cell_contents, closure)) for name, value in zip(freevar_names, freevar_values): global_vars[name] = value return global_vars class Kernel: counter = 0 def __init__(self, _func, is_grad, _classkernel=False): self.func = _func self.kernel_counter = Kernel.counter Kernel.counter += 1 self.is_grad = is_grad self.grad = None self.argument_annotations = [] self.argument_names = [] self.return_type = None self.classkernel = _classkernel self.extract_arguments() self.template_slot_locations = [] for i, anno in enumerate(self.argument_annotations): if isinstance(anno, template): self.template_slot_locations.append(i) self.mapper = TaichiCallableTemplateMapper( self.argument_annotations, self.template_slot_locations) impl.get_runtime().kernels.append(self) self.reset() self.kernel_cpp = None def reset(self): self.runtime = impl.get_runtime() if self.is_grad: self.compiled_functions = self.runtime.compiled_grad_functions else: self.compiled_functions = self.runtime.compiled_functions def extract_arguments(self): sig = inspect.signature(self.func) if sig.return_annotation not in (inspect._empty, None): self.return_type = sig.return_annotation params = sig.parameters arg_names = params.keys() for i, arg_name in enumerate(arg_names): param = params[arg_name] if param.kind == inspect.Parameter.VAR_KEYWORD: raise TaichiSyntaxError( 'Taichi kernels do not support variable keyword parameters (i.e., **kwargs)' ) if param.kind == inspect.Parameter.VAR_POSITIONAL: raise TaichiSyntaxError( 'Taichi kernels do not support variable positional parameters (i.e., *args)' ) if param.default is not inspect.Parameter.empty: raise TaichiSyntaxError( 'Taichi kernels do not support default values for arguments' ) if param.kind == inspect.Parameter.KEYWORD_ONLY: raise TaichiSyntaxError( 'Taichi kernels do not support keyword parameters') if param.kind != inspect.Parameter.POSITIONAL_OR_KEYWORD: raise TaichiSyntaxError( 'Taichi kernels only support "positional or keyword" parameters' ) annotation = param.annotation if param.annotation is inspect.Parameter.empty: if i == 0 and self.classkernel: # The |self| parameter annotation = template() else: raise TaichiSyntaxError( 'Taichi kernels parameters must be type annotated') else: if isinstance(annotation, (template, any_arr)): pass elif id(annotation) in primitive_types.type_ids: pass elif isinstance(annotation, sparse_matrix_builder): pass elif isinstance(annotation, MatrixType): pass else: raise TaichiSyntaxError( f'Invalid type annotation (argument {i}) of Taichi kernel: {annotation}' ) self.argument_annotations.append(annotation) self.argument_names.append(param.name) def materialize(self, key=None, args=None, arg_features=None): if key is None: key = (self.func, 0) self.runtime.materialize() if key in self.compiled_functions: return grad_suffix = "" if self.is_grad: grad_suffix = "_grad" kernel_name = f"{self.func.__name__}_c{self.kernel_counter}_{key[1]}{grad_suffix}" _logging.trace(f"Compiling kernel {kernel_name}...") tree, ctx = _get_tree_and_ctx( self, args=args, excluded_parameters=self.template_slot_locations, arg_features=arg_features) if self.is_grad: KernelSimplicityASTChecker(self.func).visit(tree) # Do not change the name of 'taichi_ast_generator' # The warning system needs this identifier to remove unnecessary messages def taichi_ast_generator(kernel_cxx): if self.runtime.inside_kernel: raise TaichiSyntaxError( "Kernels cannot call other kernels. I.e., nested kernels are not allowed. " "Please check if you have direct/indirect invocation of kernels within kernels. " "Note that some methods provided by the Taichi standard library may invoke kernels, " "and please move their invocations to Python-scope.") self.runtime.inside_kernel = True self.runtime.current_kernel = self try: ctx.ast_builder = kernel_cxx.ast_builder() transform_tree(tree, ctx) if not impl.get_runtime().experimental_real_function: if self.return_type and not ctx.returned: raise TaichiSyntaxError( "Kernel has a return type but does not have a return statement" ) finally: self.runtime.inside_kernel = False self.runtime.current_kernel = None taichi_kernel = impl.get_runtime().prog.create_kernel( taichi_ast_generator, kernel_name, self.is_grad) self.kernel_cpp = taichi_kernel assert key not in self.compiled_functions self.compiled_functions[key] = self.get_function_body(taichi_kernel) def get_torch_callbacks(self, v, has_torch, is_ndarray=True): callbacks = [] def get_call_back(u, v): def call_back(): u.copy_(v) return call_back assert has_torch assert isinstance(v, torch.Tensor) if v._is_view(): raise ValueError( "Torch view tensors are not supported, please call tensor.clone() before passing it into taichi kernel." ) tmp = v taichi_arch = self.runtime.prog.config.arch # Ndarray means its memory is allocated on the specified taichi arch. # Since torch only supports CPU & CUDA, torch-base ndarray only supports # taichi cpu/cuda backend as well. # Note I put x64/arm64/cuda here to be more specific. assert not is_ndarray or taichi_arch in ( _ti_core.Arch.cuda, _ti_core.Arch.x64, _ti_core.Arch.arm64 ), "Torch-based ndarray is only supported on taichi x64/arm64/cuda backend." if str(v.device).startswith('cuda'): # External tensor on cuda if taichi_arch != _ti_core.Arch.cuda: # copy data back to cpu host_v = v.to(device='cpu', copy=True) tmp = host_v callbacks.append(get_call_back(v, host_v)) else: # External tensor on cpu if taichi_arch == _ti_core.Arch.cuda: gpu_v = v.cuda() tmp = gpu_v callbacks.append(get_call_back(v, gpu_v)) return tmp, callbacks def get_function_body(self, t_kernel): # The actual function body
@staticmethod def match_ext_arr(v): has_array = isinstance(v, np.ndarray) if not has_array and has_pytorch(): has_array = isinstance(v, torch.Tensor) return has_array def ensure_compiled(self, *args): instance_id, arg_features = self.mapper.lookup(args) key = (self.func, instance_id) self.materialize(key=key, args=args, arg_features=arg_features) return key # For small kernels (< 3us), the performance can be pretty sensitive to overhead in __call__ # Thus this part needs to be fast. (i.e. < 3us on a 4 GHz x64 CPU) @_shell_pop_print def __call__(self, *args, **kwargs): if self.is_grad and impl.current_cfg().opt_level == 0: _logging.warn( """opt_level = 1 is enforced to enable gradient computation.""" ) impl.current_cfg().opt_level = 1 assert len(kwargs) == 0, 'kwargs not supported for Taichi kernels' key = self.ensure_compiled(*args) return self.compiled_functions[key](*args) # For a Taichi class definition like below: # # @ti.data_oriented # class X: # @ti.kernel # def foo(self): # ... # # When ti.kernel runs, the stackframe's |code_context| of Python 3.8(+) is # different from that of Python 3.7 and below. In 3.8+, it is 'class X:', # whereas in <=3.7, it is '@ti.data_oriented'. More interestingly, if the class # inherits, i.e. class X(object):, then in both versions, |code_context| is # 'class X(object):'... _KERNEL_CLASS_STACKFRAME_STMT_RES = [ re.compile(r'@(\w+\.)?data_oriented'), re.compile(r'class '), ] def _inside_class(level_of_class_stackframe): try: maybe_class_frame = sys._getframe(level_of_class_stackframe) statement_list = inspect.getframeinfo(maybe_class_frame)[3] first_statment = statement_list[0].strip() for pat in _KERNEL_CLASS_STACKFRAME_STMT_RES: if pat.match(first_statment): return True except: pass return False def _kernel_impl(_func, level_of_class_stackframe, verbose=False): # Can decorators determine if a function is being defined inside a class? # https://stackoverflow.com/a/8793684/12003165 is_classkernel = _inside_class(level_of_class_stackframe + 1) if verbose: print(f'kernel={_func.__name__} is_classkernel={is_classkernel}') primal = Kernel(_func, is_grad=False, _classkernel=is_classkernel) adjoint = Kernel(_func, is_grad=True, _classkernel=is_classkernel) # Having |primal| contains |grad| makes the tape work. primal.grad = adjoint if is_classkernel: # For class kernels, their primal/adjoint callables are constructed # when the kernel is accessed via the instance inside # _BoundedDifferentiableMethod. # This is because we need to bind the kernel or |grad| to the instance # owning the kernel, which is not known until the kernel is accessed. # # See also: _BoundedDifferentiableMethod, data_oriented. @functools.wraps(_func) def wrapped(*args, **kwargs): # If we reach here (we should never), it means the class is not decorated # with @ti.data_oriented, otherwise getattr would have intercepted the call. clsobj = type(args[0]) assert not hasattr(clsobj, '_data_oriented') raise TaichiSyntaxError( f'Please decorate class {clsobj.__name__} with @ti.data_oriented' ) else: @functools.wraps(_func) def wrapped(*args, **kwargs): try: return primal(*args, **kwargs) except TaichiCompilationError as e: raise type(e)('\n' + str(e)) from None wrapped.grad = adjoint wrapped._is_wrapped_kernel = True wrapped._is_classkernel = is_classkernel wrapped._primal = primal wrapped._adjoint = adjoint return wrapped def kernel(fn): """Marks a function as a Taichi kernel. A Taichi kernel is a function written in Python, and gets JIT compiled by Taichi into native CPU/GPU instructions (e.g. a series of CUDA kernels). The top-level ``for`` loops are automatically parallelized, and distributed to either a CPU thread pool or massively parallel GPUs. Kernel's gradient kernel would be generated automatically by the AutoDiff system. See also https://docs.taichi.graphics/lang/articles/basic/syntax#kernels. Args: fn (Callable): the Python function to be decorated Returns: Callable: The decorated function Example:: >>> x = ti.field(ti.i32, shape=(4, 8)) >>> >>> @ti.kernel >>> def run(): >>> # Assigns all the elements of `x` in parallel. >>> for i in x: >>> x[i] = i """ return _kernel_impl(fn, level_of_class_stackframe=3) class _BoundedDifferentiableMethod: def __init__(self, kernel_owner, wrapped_kernel_func): clsobj = type(kernel_owner) if not getattr(clsobj, '_data_oriented', False): raise TaichiSyntaxError( f'Please decorate class {clsobj.__name__} with @ti.data_oriented' ) self._kernel_owner = kernel_owner self._primal = wrapped_kernel_func._primal self._adjoint = wrapped_kernel_func._adjoint self._is_staticmethod = wrapped_kernel_func._is_staticmethod self.__name__ = None def __call__(self, *args, **kwargs): if self._is_staticmethod: return self._primal(*args, **kwargs) return self._primal(self._kernel_owner, *args, **kwargs) def grad(self, *args, **kwargs): return self._adjoint(self._kernel_owner, *args, **kwargs) def data_oriented(cls): """Marks a class as Taichi compatible. To allow for modularized code, Taichi provides this decorator so that Taichi kernels can be defined inside a class. See also https://docs.taichi.graphics/lang/articles/advanced/odop Example:: >>> @ti.data_oriented >>> class TiArray: >>> def __init__(self, n): >>> self.x = ti.field(ti.f32, shape=n) >>> >>> @ti.kernel >>> def inc(self): >>> for i in self.x: >>> self.x[i] += 1.0 >>> >>> a = TiArray(32) >>> a.inc() Args: cls (Class): the class to be decorated Returns: The decorated class. """ def _getattr(self, item): method = cls.__dict__.get(item, None) is_property = method.__class__ == property is_staticmethod = method.__class__ == staticmethod if is_property: x = method.fget else: x = super(cls, self).__getattribute__(item) if hasattr(x, '_is_wrapped_kernel'): if inspect.ismethod(x): wrapped = x.__func__ else: wrapped = x wrapped._is_staticmethod = is_staticmethod assert inspect.isfunction(wrapped) if wrapped._is_classkernel: ret = _BoundedDifferentiableMethod(self, wrapped) ret.__name__ = wrapped.__name__ if is_property: return ret() return ret if is_property: return x(self) return x cls.__getattribute__ = _getattr cls._data_oriented = True return cls __all__ = ["data_oriented", "func", "kernel"]
def func__(*args): assert len(args) == len( self.argument_annotations ), f'{len(self.argument_annotations)} arguments needed but {len(args)} provided' tmps = [] callbacks = [] has_external_arrays = False has_torch = has_pytorch() ndarray_use_torch = impl.get_runtime().ndarray_use_torch actual_argument_slot = 0 launch_ctx = t_kernel.make_launch_context() for i, v in enumerate(args): needed = self.argument_annotations[i] if isinstance(needed, template): continue provided = type(v) # Note: do not use sth like "needed == f32". That would be slow. if id(needed) in primitive_types.real_type_ids: if not isinstance(v, (float, int)): raise TaichiRuntimeTypeError(i, needed.to_string(), provided) launch_ctx.set_arg_float(actual_argument_slot, float(v)) elif id(needed) in primitive_types.integer_type_ids: if not isinstance(v, int): raise TaichiRuntimeTypeError(i, needed.to_string(), provided) launch_ctx.set_arg_int(actual_argument_slot, int(v)) elif isinstance(needed, sparse_matrix_builder): # Pass only the base pointer of the ti.linalg.sparse_matrix_builder() argument launch_ctx.set_arg_int(actual_argument_slot, v.get_addr()) elif isinstance(needed, any_arr) and isinstance( v, taichi.lang._ndarray.Ndarray): has_external_arrays = True v = v.arr if ndarray_use_torch: is_ndarray = True tmp, torch_callbacks = self.get_torch_callbacks( v, has_torch, is_ndarray) callbacks += torch_callbacks launch_ctx.set_arg_external_array_with_shape( actual_argument_slot, int(tmp.data_ptr()), tmp.element_size() * tmp.nelement(), v.shape) else: launch_ctx.set_arg_ndarray(actual_argument_slot, v) elif isinstance(needed, any_arr) and (self.match_ext_arr(v)): has_external_arrays = True is_numpy = isinstance(v, np.ndarray) if is_numpy: tmp = np.ascontiguousarray(v) # Purpose: DO NOT GC |tmp|! tmps.append(tmp) launch_ctx.set_arg_external_array_with_shape( actual_argument_slot, int(tmp.ctypes.data), tmp.nbytes, v.shape) else: is_ndarray = False tmp, torch_callbacks = self.get_torch_callbacks( v, has_torch, is_ndarray) callbacks += torch_callbacks launch_ctx.set_arg_external_array_with_shape( actual_argument_slot, int(tmp.data_ptr()), tmp.element_size() * tmp.nelement(), v.shape) elif isinstance(needed, MatrixType): if id(needed.dtype) in primitive_types.real_type_ids: for a in range(needed.n): for b in range(needed.m): if not isinstance(v[a, b], (int, float)): raise TaichiRuntimeTypeError( i, needed.dtype.to_string(), type(v[a, b])) launch_ctx.set_arg_float( actual_argument_slot, float(v[a, b])) actual_argument_slot += 1 elif id(needed.dtype) in primitive_types.integer_type_ids: for a in range(needed.n): for b in range(needed.m): if not isinstance(v[a, b], int): raise TaichiRuntimeTypeError( i, needed.dtype.to_string(), type(v[a, b])) launch_ctx.set_arg_int(actual_argument_slot, int(v[a, b])) actual_argument_slot += 1 else: raise ValueError( f'Matrix dtype {needed.dtype} is not integer type or real type.' ) continue else: raise ValueError( f'Argument type mismatch. Expecting {needed}, got {type(v)}.' ) actual_argument_slot += 1 # Both the class kernels and the plain-function kernels are unified now. # In both cases, |self.grad| is another Kernel instance that computes the # gradient. For class kernels, args[0] is always the kernel owner. if not self.is_grad and self.runtime.target_tape and not self.runtime.grad_replaced: self.runtime.target_tape.insert(self, args) t_kernel(launch_ctx) ret = None ret_dt = self.return_type has_ret = ret_dt is not None if has_ret or (impl.current_cfg().async_mode and has_external_arrays): runtime_ops.sync() if has_ret: if id(ret_dt) in primitive_types.integer_type_ids: ret = t_kernel.get_ret_int(0) else: ret = t_kernel.get_ret_float(0) if callbacks: for c in callbacks: c() return ret return func__
import8.rs
import foo::x; import z = foo::x; mod foo { fn
(y: int) { log(debug, y); } } fn main() { x(10); z(10); }
x
renderer.rs
use crate::{ resources::{ RendererCamera, RendererMaterial, RendererMesh, RendererPipeline, RendererTexture, Vertex }, instance::Instance, renderer_resource_storage::RendererResourceStorage }; use pill_engine::internal::{ PillRenderer, EntityHandle, RenderQueueItem, RendererError, TextureType, MeshData, MaterialTextureMap, TransformComponent, ComponentStorage, CameraComponent, MaterialParameterMap, RendererCameraHandle, RendererMaterialHandle, RendererMeshHandle, RendererPipelineHandle, RendererTextureHandle, RENDER_QUEUE_KEY_ORDER, get_renderer_resource_handle_from_camera_component, }; use pill_core::{ PillSlotMapKey, PillSlotMapKeyData, PillStyle }; use std::{ iter, num::{ NonZeroU32 }, ops::Range, mem::size_of, }; use anyhow::{ Result }; use log::{ info }; pub const MAX_INSTANCE_PER_DRAWCALL_COUNT: usize = 10000; pub const INITIAL_INSTANCE_VECTOR_CAPACITY: usize = 10000; // Default resource handle - Master pipeline pub const MASTER_PIPELINE_HANDLE: RendererPipelineHandle = RendererPipelineHandle { 0: PillSlotMapKeyData { index: 1, version: unsafe { std::num::NonZeroU32::new_unchecked(1) } } }; pub struct Renderer { pub state: State, } impl PillRenderer for Renderer { fn new(window: &winit::window::Window, config: config::Config) -> Self { info!("Initializing {}", "Renderer".mobj_style()); let state: State = pollster::block_on(State::new(&window, config)); Self { state, } } fn resize(&mut self, new_window_size: winit::dpi::PhysicalSize<u32>) { info!("Resizing {} resources", "Renderer".mobj_style()); self.state.resize(new_window_size) } fn set_master_pipeline(&mut self, vertex_shader_bytes: &[u8], fragment_shader_bytes: &[u8]) -> Result<()> { // Create shaders let vertex_shader = wgpu::ShaderModuleDescriptor { label: Some("master_vertex_shader"), source: wgpu::util::make_spirv(vertex_shader_bytes), }; let vertex_shader = self.state.device.create_shader_module(&vertex_shader); let fragment_shader = wgpu::ShaderModuleDescriptor { label: Some("master_fragment_shader"), source: wgpu::util::make_spirv(fragment_shader_bytes), }; let fragment_shader = self.state.device.create_shader_module(&fragment_shader); // Create master pipeline let master_pipeline = RendererPipeline::new( &self.state.device, vertex_shader, fragment_shader, self.state.color_format, Some(self.state.depth_format), &[RendererMesh::data_layout_descriptor(), Instance::data_layout_descriptor()], ).unwrap(); self.state.renderer_resource_storage.pipelines.insert(master_pipeline); Ok(()) } fn create_mesh(&mut self, name: &str, mesh_data: &MeshData) -> Result<RendererMeshHandle> { let mesh = RendererMesh::new(&self.state.device, name, mesh_data)?; let handle = self.state.renderer_resource_storage.meshes.insert(mesh); Ok(handle) } fn create_texture(&mut self, name: &str, image_data: &image::DynamicImage, texture_type: TextureType) -> Result<RendererTextureHandle> { let texture = RendererTexture::new_texture(&self.state.device, &self.state.queue, Some(name), image_data, texture_type)?; let handle = self.state.renderer_resource_storage.textures.insert(texture); Ok(handle) } fn create_material(&mut self, name: &str, textures: &MaterialTextureMap, parameters: &MaterialParameterMap) -> Result<RendererMaterialHandle> { let pipeline_handle = MASTER_PIPELINE_HANDLE; let pipeline = self.state.renderer_resource_storage.pipelines.get(pipeline_handle).unwrap(); let material = RendererMaterial::new( &self.state.device, &self.state.queue, &self.state.renderer_resource_storage, name, pipeline_handle, &pipeline.material_texture_bind_group_layout, textures, &pipeline.material_parameter_bind_group_layout, parameters, ).unwrap(); let handle = self.state.renderer_resource_storage.materials.insert(material); Ok(handle) } fn create_camera(&mut self) -> Result<RendererCameraHandle> { let pipeline_handle = MASTER_PIPELINE_HANDLE; let pipeline = self.state.renderer_resource_storage.pipelines.get(pipeline_handle).unwrap(); let camera_bind_group_layout = &pipeline.camera_bind_group_layout; let camera = RendererCamera::new(&self.state.device, camera_bind_group_layout)?; let handle = self.state.renderer_resource_storage.cameras.insert(camera); Ok(handle) } fn update_material_textures(&mut self, renderer_material_handle: RendererMaterialHandle, textures: &MaterialTextureMap) -> Result<()> { RendererMaterial::update_textures(&self.state.device, renderer_material_handle, &mut self.state.renderer_resource_storage, textures) } fn update_material_parameters(&mut self, renderer_material_handle: RendererMaterialHandle, parameters: &MaterialParameterMap) -> Result<()> { RendererMaterial::update_parameters(&self.state.device, &self.state.queue, renderer_material_handle, &mut self.state.renderer_resource_storage, parameters) } fn destroy_mesh(&mut self, renderer_mesh_handle: RendererMeshHandle) -> Result<()> { self.state.renderer_resource_storage.meshes.remove(renderer_mesh_handle).unwrap(); Ok(()) } fn destroy_texture(&mut self, renderer_texture_handle: RendererTextureHandle) -> Result<()> { self.state.renderer_resource_storage.textures.remove(renderer_texture_handle).unwrap(); Ok(()) } fn destroy_material(&mut self, renderer_material_handle: RendererMaterialHandle) -> Result<()> { self.state.renderer_resource_storage.materials.remove(renderer_material_handle).unwrap(); Ok(()) } fn destroy_camera(&mut self, renderer_camera_handle: RendererCameraHandle) -> Result<()> { self.state.renderer_resource_storage.cameras.remove(renderer_camera_handle).unwrap(); Ok(()) } fn render( &mut self, active_camera_entity_handle: EntityHandle, render_queue: &Vec<RenderQueueItem>, camera_component_storage: &ComponentStorage<CameraComponent>, transform_component_storage: &ComponentStorage<TransformComponent> ) -> Result<(), RendererError> { self.state.render( active_camera_entity_handle, render_queue, camera_component_storage, transform_component_storage) } } pub struct State { // Resources renderer_resource_storage: RendererResourceStorage, // Renderer variables surface: wgpu::Surface, device: wgpu::Device, queue: wgpu::Queue, surface_configuration: wgpu::SurfaceConfiguration, window_size: winit::dpi::PhysicalSize<u32>, color_format: wgpu::TextureFormat, depth_format: wgpu::TextureFormat, depth_texture: RendererTexture, mesh_drawer: MeshDrawer, // Other config: config::Config, } impl State { // Creating some of the wgpu types requires async code async fn new(window: &winit::window::Window, config: config::Config) -> Self { let window_size = window.inner_size(); let instance = wgpu::Instance::new(wgpu::Backends::all()); let surface = unsafe { instance.create_surface(window) }; // Specify adapter options (Options passed here are not guaranteed to work for all devices) let request_adapter_options = wgpu::RequestAdapterOptions { power_preference: wgpu::PowerPreference::default(), compatible_surface: Some(&surface), force_fallback_adapter: false, }; // Create adapter let adapter = instance.request_adapter(&request_adapter_options).await.unwrap(); let adapter_info = adapter.get_info(); info!("Using GPU: {} ({:?})", adapter_info.name, adapter_info.backend); // Create device descriptor let device_descriptor = wgpu::DeviceDescriptor { label: None, features: wgpu::Features::empty(), // Allows to specify what extra features of GPU that needs to be included (e.g. depth clamping, push constants, texture compression, etc) limits: wgpu::Limits::default(), // Allows to specify the limit of certain types of resources that will be used (e.g. max samplers, uniform buffers, etc) }; // Create device and queue let (device, queue) = adapter.request_device(&device_descriptor,None).await.unwrap(); // Specify surface configuration let surface_configuration = wgpu::SurfaceConfiguration { usage: wgpu::TextureUsages::RENDER_ATTACHMENT, // Defines how the swap_chain's underlying textures will be used format: surface.get_preferred_format(&adapter).unwrap(), // Defines how the swap_chain's textures will be stored on the gpu width: window_size.width, height: window_size.height, present_mode: wgpu::PresentMode::Mailbox, // Defines how to sync the surface with the display }; // Configure surface surface.configure(&device, &surface_configuration); // Configure collections let renderer_resource_storage = RendererResourceStorage::new(&config); // Create depth and color texture let depth_texture = RendererTexture::new_depth_texture( &device, &surface_configuration, "depth_texture" ).unwrap(); let color_format = surface_configuration.format; let depth_format = wgpu::TextureFormat::Depth32Float; // Create drawing state let mesh_drawer = MeshDrawer::new(&device, MAX_INSTANCE_PER_DRAWCALL_COUNT as u32); // Create state Self { // Resources renderer_resource_storage, // Renderer variables surface, device, queue, surface_configuration, window_size, color_format, depth_format, depth_texture, mesh_drawer, // Other config, } } fn resize(&mut self, new_window_size: winit::dpi::PhysicalSize<u32>) { if new_window_size.width > 0 && new_window_size.height > 0
} fn render( &mut self, active_camera_entity_handle: EntityHandle, render_queue: &Vec<RenderQueueItem>, camera_component_storage: &ComponentStorage<CameraComponent>, transform_component_storage: &ComponentStorage<TransformComponent> ) -> Result<(), RendererError> { // Get frame or return mapped error if failed let frame = self.surface.get_current_texture(); let frame = match frame { Ok(frame) => frame, Err(error) => match error { wgpu::SurfaceError::Lost => return Err(RendererError::SurfaceLost), wgpu::SurfaceError::OutOfMemory => return Err(RendererError::SurfaceOutOfMemory), _ => return Err(RendererError::SurfaceOther), }, }; let view = frame.texture.create_view(&wgpu::TextureViewDescriptor::default()); // Get active camera and update it let camera_storage = camera_component_storage.data.get(active_camera_entity_handle.data().index as usize).unwrap(); let active_camera_component = camera_storage.as_ref().unwrap(); let renderer_camera = self.renderer_resource_storage.cameras.get_mut(get_renderer_resource_handle_from_camera_component(active_camera_component)).ok_or(RendererError::RendererResourceNotFound)?; let camera_transform_storage = transform_component_storage.data.get(active_camera_entity_handle.data().index as usize).unwrap(); let active_camera_transform_component = camera_transform_storage.as_ref().unwrap(); renderer_camera.update(&self.queue, active_camera_component, active_camera_transform_component); let renderer_camera = self.renderer_resource_storage.cameras.get(get_renderer_resource_handle_from_camera_component(active_camera_component)).unwrap(); let clear_color = active_camera_component.clear_color; // Build a command buffer that can be sent to the GPU let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: Some("render_encoder"), }); { // Additional scope to release mutable borrow of encoder done by begin_render_pass // Create color attachment let color_attachment = wgpu::RenderPassColorAttachment { view: &view, // Specifies what texture to save the colors to resolve_target: None, // Specifies what texture will receive the resolved output ops: wgpu::Operations { // Specifies what to do with the colors on the screen load: wgpu::LoadOp::Clear(wgpu::Color { r: clear_color.x as f64, g: clear_color.y as f64, b: clear_color.z as f64, a: 1.0, } ), // Specifies how to handle colors stored from the previous frame store: true, }, }; // Create depth attachment let depth_stencil_attachment = wgpu::RenderPassDepthStencilAttachment { view: &self.depth_texture.texture_view, depth_ops: Some(wgpu::Operations { load: wgpu::LoadOp::Clear(1.0), store: true, }), stencil_ops: None, }; self.mesh_drawer.record_draw_commands( &self.queue, &mut encoder, &self.renderer_resource_storage, color_attachment, depth_stencil_attachment, &renderer_camera, &render_queue, &transform_component_storage ) } self.queue.submit(iter::once(encoder.finish())); // Finish command buffer and submit it to the GPU's render queue frame.present(); Ok(()) } } pub struct MeshDrawer { current_rendering_order: u8, current_pipeline_handle: Option<RendererPipelineHandle>, current_material_handle: Option<RendererMaterialHandle>, current_mesh_handle: Option<RendererMeshHandle>, current_mesh_index_count: u32, max_instance_count: u32, instances: Vec::<Instance>, instance_buffer: wgpu::Buffer, instance_range: Range<u32>, } impl MeshDrawer { pub fn new(device: &wgpu::Device, max_instance_count: u32) -> Self { // Create instance buffer let buffer_size = (size_of::<Instance>() * max_instance_count as usize) as u64; let instance_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: Some("instance_buffer"), size: buffer_size, usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST, mapped_at_creation: false, }); MeshDrawer { current_rendering_order: 0, current_pipeline_handle: None, current_material_handle: None, current_mesh_handle: None, current_mesh_index_count: 0, max_instance_count, instances: Vec::<Instance>::with_capacity(INITIAL_INSTANCE_VECTOR_CAPACITY), instance_buffer, instance_range: 0..0, // Start inclusive, end exclusive (e.g. 0..3 means indices 0, 1, 2. e.g. 5..7 means indices 5, 6) } } pub fn record_draw_commands( &mut self, // Resources queue: &wgpu::Queue, encoder: &mut wgpu::CommandEncoder, renderer_resource_storage: &RendererResourceStorage, color_attachment: wgpu::RenderPassColorAttachment, depth_stencil_attachment: wgpu::RenderPassDepthStencilAttachment, // Rendring data camera: &RendererCamera, render_queue: &Vec::<RenderQueueItem>, transform_component_storage: &ComponentStorage<TransformComponent> ) { // Prepare instance data and load it to buffer let render_queue_iter = render_queue.iter(); for render_queue_item in render_queue_iter { let transform_slot = transform_component_storage.data.get(render_queue_item.entity_index as usize).unwrap(); let transform_component = transform_slot.as_ref().unwrap(); self.instances.push(Instance::new(transform_component)); } queue.write_buffer(&self.instance_buffer, 0, bytemuck::cast_slice(&self.instances)); // Update instance buffer self.instances.clear(); // Start encoding render pass let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { // Use the encoder to create a RenderPass label: Some("render_pass"), color_attachments: &[color_attachment], depth_stencil_attachment: Some(depth_stencil_attachment), }); render_pass.set_vertex_buffer(1, self.instance_buffer.slice(..)); // Set instance buffer let render_queue_iter = render_queue.iter(); for render_queue_item in render_queue_iter { let render_queue_key_fields = pill_engine::internal::decompose_render_queue_key(render_queue_item.key).unwrap(); // Recreate resource handles let renderer_material_handle = RendererMaterialHandle::new(render_queue_key_fields.material_index.into(), NonZeroU32::new(render_queue_key_fields.material_version.into()).unwrap()); let renderer_mesh_handle = RendererMeshHandle::new(render_queue_key_fields.mesh_index.into(), NonZeroU32::new(render_queue_key_fields.mesh_version.into()).unwrap()); // Check rendering order if self.current_rendering_order > render_queue_key_fields.order { if self.get_accumulated_instance_count() > 0 { render_pass.draw_indexed(0..self.current_mesh_index_count, 0, self.instance_range.clone()); self.instance_range = self.instance_range.end..self.instance_range.end; } // Set new order self.current_rendering_order = render_queue_key_fields.order; } // Check material if self.current_material_handle != Some(renderer_material_handle) { // Render accumulated instances if self.get_accumulated_instance_count() > 0 { render_pass.draw_indexed(0..self.current_mesh_index_count, 0, self.instance_range.clone()); self.instance_range = self.instance_range.end..self.instance_range.end; } // Set new material self.current_material_handle = Some(renderer_material_handle); let material = renderer_resource_storage.materials.get(self.current_material_handle.unwrap()).unwrap(); // Set pipeline if new material is using different one if self.current_pipeline_handle != Some(material.pipeline_handle) { self.current_pipeline_handle = Some(material.pipeline_handle); let pipeline = renderer_resource_storage.pipelines.get( self.current_pipeline_handle.unwrap()).unwrap(); render_pass.set_pipeline(&pipeline.render_pipeline); } render_pass.set_bind_group(0, &material.texture_bind_group, &[]); render_pass.set_bind_group(1, &material.parameter_bind_group, &[]); render_pass.set_bind_group(2, &camera.bind_group, &[]); } // Check mesh if self.current_mesh_handle != Some(renderer_mesh_handle) { // Render accumulated instances if self.get_accumulated_instance_count() > 0 { render_pass.draw_indexed(0..self.current_mesh_index_count, 0, self.instance_range.clone()); self.instance_range = self.instance_range.end..self.instance_range.end; } // Set new mesh self.current_mesh_handle = Some(renderer_mesh_handle); let mesh = renderer_resource_storage.meshes.get(self.current_mesh_handle.unwrap()).unwrap(); self.current_mesh_index_count = mesh.index_count; render_pass.set_vertex_buffer(0, mesh.vertex_buffer.slice(..)); render_pass.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32); } // Check max instance per draw call count if self.get_accumulated_instance_count() >= self.max_instance_count { render_pass.draw_indexed(0..self.current_mesh_index_count, 0, self.instance_range.clone()); self.instance_range = self.instance_range.end..self.instance_range.end; } else { // Add new instance self.instance_range = self.instance_range.start..self.instance_range.end + 1; } } // End of render queue so draw remaining saved objects if self.get_accumulated_instance_count() > 0 { render_pass.draw_indexed(0..self.current_mesh_index_count, 0, self.instance_range.clone()); self.instance_range = self.instance_range.end..self.instance_range.end; } // Reset state of mesh drawer self.current_rendering_order = RENDER_QUEUE_KEY_ORDER.max as u8; self.current_pipeline_handle = None; self.current_material_handle = None; self.current_mesh_handle = None; self.current_mesh_index_count = 0; self.instance_range = 0..0; } fn get_accumulated_instance_count(&self) -> u32 { self.instance_range.end - self.instance_range.start } }
{ self.window_size = new_window_size; self.surface_configuration.width = new_window_size.width; self.surface_configuration.height = new_window_size.height; self.surface.configure(&self.device, &self.surface_configuration); self.depth_texture = RendererTexture::new_depth_texture( &self.device, &self.surface_configuration, "depth_texture", ).unwrap(); }
meta.rs
use std::any::TypeId; use std::marker::PhantomData; use hashbrown::hash_map::{Entry, HashMap}; use mopa::Any; use crate::resource::{Resource, ResourceId}; use super::World; /// The `MetaTable` which allows to store object-safe trait implementations for /// resources. /// /// For example, you have a trait `Foo` that is implemented by several /// resources. You can register all the implementors using /// `MetaTable::register`. Later on, you can iterate over all resources that /// implement `Foo` without knowing their specific type. /// /// # Examples /// /// ``` /// use async_ecs::*; /// /// trait Object { /// fn method1(&self) -> i32; /// /// fn method2(&mut self, x: i32); /// } /// /// unsafe impl<T> CastFrom<T> for dyn Object /// where /// T: Object + 'static, /// { /// fn cast(t: &T) -> &Self { /// t /// } /// /// fn cast_mut(t: &mut T) -> &mut Self { /// t /// } /// } /// /// struct ImplementorA(i32); /// /// impl Object for ImplementorA { /// fn method1(&self) -> i32 { /// self.0 /// } /// /// fn method2(&mut self, x: i32) { /// self.0 += x; /// } /// } /// /// struct ImplementorB(i32); /// /// impl Object for ImplementorB { /// fn method1(&self) -> i32 { /// self.0 /// } /// /// fn method2(&mut self, x: i32) { /// self.0 *= x; /// } /// } /// /// let mut world = World::default(); /// world.insert(ImplementorA(3)); /// world.insert(ImplementorB(1)); /// /// let mut table = MetaTable::<dyn Object>::new(); /// table.register(&ImplementorA(31415)); // Can just be some instance of type `&ImplementorA`. /// table.register(&ImplementorB(27182)); /// /// { /// let mut iter = table.iter(&mut world); /// assert_eq!(iter.next().unwrap().method1(), 3); /// assert_eq!(iter.next().unwrap().method1(), 1); /// } /// ``` pub struct MetaTable<T: ?Sized> { fat: Vec<FatPtr>, tys: Vec<TypeId>, indices: HashMap<TypeId, usize>, marker: PhantomData<Invariant<T>>, } impl<T: ?Sized> MetaTable<T> { /// Creates a new `MetaTable`. pub fn new() -> Self { assert_unsized::<T>(); Default::default() } /// Registers a resource `R` that implements the trait `T`. /// This just needs some instance of type `R` to retrieve the vtable. /// It doesn't have to be the same object you're calling `get` with later. pub fn register<R>(&mut self, r: &R) where R: Resource, T: CastFrom<R> + 'static, { let thin_ptr = r as *const R as usize; let casted_ptr = <T as CastFrom<R>>::cast(r); let thin_casted_ptr = casted_ptr as *const T as *const () as usize; assert_eq!( thin_ptr, thin_casted_ptr, "Bug: `CastFrom` did not cast `self`" ); let fat = unsafe { FatPtr::from_ptr(casted_ptr) }; let ty_id = TypeId::of::<R>(); // Important: ensure no entry exists twice! let len = self.indices.len(); match self.indices.entry(ty_id) { Entry::Occupied(occ) => { let ind = *occ.get(); self.fat[ind] = fat; } Entry::Vacant(vac) => { vac.insert(len); self.fat.push(fat); self.tys.push(ty_id); } } } /// Tries to convert `world` to a trait object of type `&T`. /// If `world` doesn't have an implementation for `T` (or it wasn't /// registered), this will return `None`. pub fn get<'a>(&self, res: &'a dyn Resource) -> Option<&'a T> { unsafe { self.indices .get(&Any::get_type_id(res)) .map(move |&ind| &*self.fat[ind].create_ptr(res as *const _ as *const ())) } } /// Tries to convert `world` to a trait object of type `&mut T`. /// If `world` doesn't have an implementation for `T` (or it wasn't /// registered), this will return `None`. pub fn get_mut<'a>(&self, res: &'a dyn Resource) -> Option<&'a mut T> { unsafe { self.indices.get(&Any::get_type_id(res)).map(move |&ind| { &mut *(self.fat[ind].create_ptr::<T>(res as *const _ as *const ()) as *mut T) }) } } /// Iterates all resources that implement `T` and were registered. pub fn iter<'a>(&'a self, res: &'a World) -> MetaIter<'a, T> { MetaIter { fat: &self.fat, index: 0, world: res, tys: &self.tys, marker: PhantomData, } } /// Iterates all resources that implement `T` and were registered mutably. pub fn iter_mut<'a>(&'a self, res: &'a World) -> MetaIterMut<'a, T> { MetaIterMut { fat: &self.fat, index: 0, world: res, tys: &self.tys, marker: PhantomData, } } } impl<T> Default for MetaTable<T> where T: ?Sized, { fn default() -> Self { MetaTable { fat: Default::default(), indices: Default::default(), tys: Default::default(), marker: Default::default(), } } } struct FatPtr(usize); impl FatPtr { unsafe fn from_ptr<T: ?Sized>(t: &T) -> Self { use std::ptr::read; assert_unsized::<T>(); let fat_ptr = &t as *const &T as *const usize; // Memory layout: // [object pointer, vtable pointer] // ^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^ // 8 bytes | 8 bytes // (on 32-bit both have 4 bytes) let vtable = read::<usize>(fat_ptr.offset(1)); Self(vtable) } unsafe fn create_ptr<T: ?Sized>(&self, ptr: *const ()) -> *const T { let fat_ptr: (*const (), usize) = (ptr, self.0); *(&fat_ptr as *const (*const (), usize) as *const *const T) } } /// Helper trait for the `MetaTable`. /// This trait is required to be implemented for a trait to be compatible with /// the meta table. /// /// # Memory safety /// /// Not casting `self` but e.g. a field to the trait object can result in severe /// memory safety issues. /// /// # Examples /// /// ``` /// use async_ecs::*; /// /// trait Foo { /// fn foo1(&self); /// fn foo2(&mut self, x: i32) -> i32; /// } /// /// unsafe impl<T> CastFrom<T> for dyn Foo /// where /// T: Foo + 'static, /// { /// fn cast(t: &T) -> &(dyn Foo + 'static) { /// t /// } /// /// fn cast_mut(t: &mut T) -> &mut (dyn Foo + 'static) { /// t /// } /// } /// ``` pub unsafe trait CastFrom<T> { /// Casts an immutable `T` reference to a trait object. fn cast(t: &T) -> &Self; /// Casts a mutable `T` reference to a trait object. fn cast_mut(t: &mut T) -> &mut Self; } /// This implements `Send` and `Sync` unconditionally. /// (the trait itself doesn't need to have these bounds and the /// resources are already guaranteed to fulfill it). struct Invariant<T: ?Sized>(*mut T); unsafe impl<T> Send for Invariant<T> where T: ?Sized {} unsafe impl<T> Sync for Invariant<T> where T: ?Sized {} /// An iterator for the `MetaTable`. pub struct MetaIter<'a, T: ?Sized + 'a> { index: usize, fat: &'a [FatPtr], tys: &'a [TypeId], world: &'a World, marker: PhantomData<Invariant<T>>, } impl<'a, T> Iterator for MetaIter<'a, T> where T: ?Sized + 'a, { type Item = &'a T; fn next(&mut self) -> Option<<Self as Iterator>::Item> { let index = self.index; self.index += 1; let res_id: ResourceId = match self.tys.get(index) { Some(&x) => x.into(), None => return None, }; // Ugly hack that works due to `UnsafeCell` and distinct resources. unsafe { self.world .resource_raw(&res_id) .map(|res| { self.fat[index].create_ptr::<T>(Box::as_ref(&res.borrow()) as *const dyn Resource as *const ()) }) .map(|ptr| &*ptr) .or_else(|| self.next()) } } } /// A mutable iterator for the `MetaTable`. pub struct MetaIterMut<'a, T: ?Sized + 'a> { index: usize, fat: &'a [FatPtr], tys: &'a [TypeId], world: &'a World, marker: PhantomData<Invariant<T>>, } impl<'a, T> Iterator for MetaIterMut<'a, T> where T: ?Sized + 'a, { type Item = &'a mut T; fn next(&mut self) -> Option<<Self as Iterator>::Item> { let index = self.index; self.index += 1; let res_id: ResourceId = match self.tys.get(index) { Some(&x) => x.into(), None => return None, }; // Ugly hack that works due to `UnsafeCell` and distinct resources. unsafe { self.world .resource_raw(&res_id) .map(|res| { self.fat[index].create_ptr::<T>(Box::as_mut(&mut res.borrow_mut()) as *mut dyn Resource as *const ()) as *mut T }) .map(|ptr| &mut *ptr) .or_else(|| self.next()) } } } fn assert_unsized<T: ?Sized>() { use std::mem::size_of; assert_eq!(size_of::<&T>(), 2 * size_of::<usize>()); } #[cfg(test)] mod tests { use super::*; use World; trait Object { fn method1(&self) -> i32; fn method2(&mut self, x: i32); } unsafe impl<T> CastFrom<T> for dyn Object where T: Object + 'static, { fn cast(t: &T) -> &Self { t } fn cast_mut(t: &mut T) -> &mut Self { t } } struct ImplementorA(i32); impl Object for ImplementorA { fn method1(&self) -> i32 { self.0 } fn method2(&mut self, x: i32) { self.0 += x; } } struct ImplementorB(i32); impl Object for ImplementorB { fn method1(&self) -> i32 { self.0 } fn method2(&mut self, x: i32) { self.0 *= x; } } #[test] fn test_iter_all() { let mut world = World::default(); world.insert(ImplementorA(3)); world.insert(ImplementorB(1)); let mut table = MetaTable::<dyn Object>::new(); table.register(&ImplementorA(125)); table.register(&ImplementorB(111_111)); { let mut iter = table.iter(&world); assert_eq!(iter.next().unwrap().method1(), 3); assert_eq!(iter.next().unwrap().method1(), 1); } { let mut iter_mut = table.iter_mut(&world); let obj = iter_mut.next().unwrap(); obj.method2(3); assert_eq!(obj.method1(), 6); let obj = iter_mut.next().unwrap(); obj.method2(4); assert_eq!(obj.method1(), 4); } } #[test] fn test_iter_all_after_removal() { let mut world = World::default(); world.insert(ImplementorA(3)); world.insert(ImplementorB(1)); let mut table = MetaTable::<dyn Object>::new(); table.register(&ImplementorA(125)); table.register(&ImplementorB(111_111)); { let mut iter = table.iter(&world); assert_eq!(iter.next().unwrap().method1(), 3); assert_eq!(iter.next().unwrap().method1(), 1); } world.remove::<ImplementorA>().unwrap(); { let mut iter = table.iter(&world); assert_eq!(iter.next().unwrap().method1(), 1); } world.remove::<ImplementorB>().unwrap(); } struct ImplementorC; impl Object for ImplementorC { fn method1(&self) -> i32
fn method2(&mut self, _x: i32) { unimplemented!() } } struct ImplementorD; impl Object for ImplementorD { fn method1(&self) -> i32 { 42 } fn method2(&mut self, _x: i32) { unimplemented!() } } #[test] fn get() { let mut world = World::default(); world.insert(ImplementorC); world.insert(ImplementorD); let mut table = MetaTable::<dyn Object>::new(); table.register(&ImplementorC); table.register(&ImplementorD); assert_eq!( table .get(&*world.resource::<ImplementorC>()) .unwrap() .method1(), 33 ); assert_eq!( table .get(&*world.resource::<ImplementorD>()) .unwrap() .method1(), 42 ); // Make sure it fulfills `Resource` requirements world.insert(table); } }
{ 33 }
tcp_connect.rs
//! sends requests to the server defined in tcp_server #[allow(dead_code)] mod common; use common::frob_machine; // client helpers generated automatically use easy_jsonrpc_mw::ArgSerializeError; use easy_jsonrpc_mw::BoundMethod; use easy_jsonrpc_mw::Response; use serde::Deserialize; use std::io; use std::io::{Read, Write}; use std::net::SocketAddr; use std::net::TcpStream; use std::time::Duration; fn main() { // manual example manual_frob(); // using a helper function call_over_tcp( &([127, 0, 0, 1], 4444).into(), &frob_machine::frob().unwrap(), ) .unwrap(); // abstracting even more let client = FrobClient::new(([127, 0, 0, 1], 4444).into()); client.call(frob_machine::frob()).unwrap(); client.call(frob_machine::unfrob()).unwrap(); let frob_count: i32 = client.call(frob_machine::get_frob_count()).unwrap(); dbg!(frob_count); client .call(frob_machine::ultimate_frob(vec![ 1, 2, 4, 8, -8, -4, -2, -1, ])) .unwrap(); } /// calls the frob rpc without using helper functions fn manual_frob() { let mut stream = TcpStream::connect_timeout(&([127, 0, 0, 1], 4444).into(), Duration::from_millis(500)) .expect("failed to connect"); let bound_method = frob_machine::frob().expect("failed to serialize empty argument list"); let (request, tracker) = bound_method.call(); serde_json::to_writer(&mut stream, &request.as_request()).expect("failed to write to stream"); // doesn't put a cap on the size of the response let json = serde_json::from_reader(&mut stream).expect("failed to read json from stream"); let mut response = Response::from_json_response(json).expect("server gave an invalid response"); tracker .get_return(&mut response) .expect("server did not respond to rpc"); } fn call_stream<S: Read + Write, R: Deserialize<'static>>( stream: &mut S, method: &BoundMethod<'_, R>, ) -> io::Result<R> { let (request, tracker) = method.call(); serde_json::to_writer(&mut *stream, &request.as_request()).expect("failed to write to stream"); let response = serde_json::from_reader(stream)?; let mut response = Response::from_json_response(response) .map_err(|_e| io::Error::from(io::ErrorKind::InvalidData))?; tracker .get_return(&mut response) .map_err(|_e| io::Error::from(io::ErrorKind::InvalidData)) } fn call_over_tcp<R: Deserialize<'static>>( address: &SocketAddr, method: &BoundMethod<'_, R>, ) -> io::Result<R> { let mut stream = TcpStream::connect_timeout(address, Duration::from_millis(500))?; call_stream(&mut stream, method) } struct FrobClient { address: SocketAddr, } impl FrobClient { fn new(address: SocketAddr) -> FrobClient { FrobClient { address } }
&self, method: Result<BoundMethod<'_, R>, ArgSerializeError>, ) -> io::Result<R> { call_over_tcp( &self.address, &method.map_err(|_e| io::Error::from(io::ErrorKind::InvalidInput))?, ) } }
fn call<R: Deserialize<'static>>(
assets.rs
use bat::assets::HighlightingAssets; /// This test ensures that we are not accidentally removing themes due to submodule updates. /// It is 'ignore'd by default because it requires themes.bin to be up-to-date. #[test] #[ignore] fn
() { let assets = HighlightingAssets::from_binary(); let mut themes: Vec<_> = assets.themes().collect(); themes.sort_unstable(); assert_eq!( themes, vec![ "1337", "Coldark-Cold", "Coldark-Dark", "DarkNeon", "Dracula", "GitHub", "Monokai Extended", "Monokai Extended Bright", "Monokai Extended Light", "Monokai Extended Origin", "Nord", "OneHalfDark", "OneHalfLight", "Solarized (dark)", "Solarized (light)", "Sublime Snazzy", "TwoDark", "Visual Studio Dark+", "ansi", "base16", "base16-256", "gruvbox-dark", "gruvbox-light", "zenburn" ] ); }
all_themes_are_present
fake_netapppool.go
/* Copyright The Kubeform Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by client-gen. DO NOT EDIT. package fake import ( "context" v1alpha1 "kubeform.dev/kubeform/apis/azurerm/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" ) // FakeNetappPools implements NetappPoolInterface type FakeNetappPools struct { Fake *FakeAzurermV1alpha1 ns string } var netapppoolsResource = schema.GroupVersionResource{Group: "azurerm.kubeform.com", Version: "v1alpha1", Resource: "netapppools"} var netapppoolsKind = schema.GroupVersionKind{Group: "azurerm.kubeform.com", Version: "v1alpha1", Kind: "NetappPool"} // Get takes name of the netappPool, and returns the corresponding netappPool object, and an error if there is any. func (c *FakeNetappPools) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NetappPool, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(netapppoolsResource, c.ns, name), &v1alpha1.NetappPool{}) if obj == nil { return nil, err } return obj.(*v1alpha1.NetappPool), err } // List takes label and field selectors, and returns the list of NetappPools that match those selectors. func (c *FakeNetappPools) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NetappPoolList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(netapppoolsResource, netapppoolsKind, c.ns, opts), &v1alpha1.NetappPoolList{}) if obj == nil { return nil, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } list := &v1alpha1.NetappPoolList{ListMeta: obj.(*v1alpha1.NetappPoolList).ListMeta} for _, item := range obj.(*v1alpha1.NetappPoolList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } } return list, err } // Watch returns a watch.Interface that watches the requested netappPools. func (c *FakeNetappPools) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(netapppoolsResource, c.ns, opts)) } // Create takes the representation of a netappPool and creates it. Returns the server's representation of the netappPool, and an error, if there is any. func (c *FakeNetappPools) Create(ctx context.Context, netappPool *v1alpha1.NetappPool, opts v1.CreateOptions) (result *v1alpha1.NetappPool, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(netapppoolsResource, c.ns, netappPool), &v1alpha1.NetappPool{}) if obj == nil
return obj.(*v1alpha1.NetappPool), err } // Update takes the representation of a netappPool and updates it. Returns the server's representation of the netappPool, and an error, if there is any. func (c *FakeNetappPools) Update(ctx context.Context, netappPool *v1alpha1.NetappPool, opts v1.UpdateOptions) (result *v1alpha1.NetappPool, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(netapppoolsResource, c.ns, netappPool), &v1alpha1.NetappPool{}) if obj == nil { return nil, err } return obj.(*v1alpha1.NetappPool), err } // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *FakeNetappPools) UpdateStatus(ctx context.Context, netappPool *v1alpha1.NetappPool, opts v1.UpdateOptions) (*v1alpha1.NetappPool, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(netapppoolsResource, "status", c.ns, netappPool), &v1alpha1.NetappPool{}) if obj == nil { return nil, err } return obj.(*v1alpha1.NetappPool), err } // Delete takes name of the netappPool and deletes it. Returns an error if one occurs. func (c *FakeNetappPools) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(netapppoolsResource, c.ns, name), &v1alpha1.NetappPool{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeNetappPools) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewDeleteCollectionAction(netapppoolsResource, c.ns, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.NetappPoolList{}) return err } // Patch applies the patch and returns the patched netappPool. func (c *FakeNetappPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.NetappPool, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(netapppoolsResource, c.ns, name, pt, data, subresources...), &v1alpha1.NetappPool{}) if obj == nil { return nil, err } return obj.(*v1alpha1.NetappPool), err }
{ return nil, err }
issue-12863.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. mod foo { pub fn bar() {} }
fn main() { match () { foo::bar => {} //~ ERROR expected variant, struct or constant, found function `bar` } }
overview.component.ts
import { Component, OnInit } from '@angular/core'; import {OrderService} from './order.service'; @Component({ selector: 'app-overview', templateUrl: './overview.component.html', styleUrls: ['./overview.component.css'] }) export class
implements OnInit { orders = [{}]; constructor(private orderService: OrderService) { } ngOnInit() { this.orderService .getOrders() .subscribe((response) => { console.log(response.message); this.orders = response.data; console.log(this.orders); }); } }
OverviewComponent
proxy_scraper_provider.py
import json from pyquery import PyQuery from scylla.database import ProxyIP from .base_provider import BaseProvider class ProxyScraperProvider(BaseProvider):
def urls(self) -> [str]: return ['https://raw.githubusercontent.com/sunny9577/proxy-scraper/master/proxies.json'] def parse(self, document: PyQuery) -> [ProxyIP]: ip_list: [ProxyIP] = [] text = document.html() json_object = json.load(text) if not json_object or type(json_object['usproxy']) != list: return ip_list for ip_port in json_object['usproxy']: p = ProxyIP(ip=ip_port['ip'], port=ip_port['port']) ip_list.append(p) return ip_list @staticmethod def should_render_js() -> bool: return False
genesis.go
/* Copyright [2019] - [2021], PERSISTENCE TECHNOLOGIES PTE. LTD. and the persistenceCore contributors SPDX-License-Identifier: Apache-2.0 */ package application import ( "encoding/json" applicationParams "github.com/persistenceOne/persistenceCore/application/params" ) // The genesis state of the blockchain is represented here as a map of raw json // messages key'd by a identifier string. // The identifier is used to determine which module genesis information belongs // to so it may be appropriately routed during init chain. // Within this application default genesis information is retrieved from // the ModuleBasicManager which populates json from each BasicModule // object provided to it during init. type GenesisState map[string]json.RawMessage // NewDefaultGenesisState generates the default state for the application. func NewDefaultGenesisState() GenesisState
{ encCfg := applicationParams.MakeEncodingConfig() return ModuleBasics.DefaultGenesis(encCfg.Marshaler) }
lobby.go
package game import ( "encoding/json" "fmt" "html" "log" "math" "math/rand" "strconv" "strings" "sync" "time" commands "github.com/Bios-Marcel/cmdp" "github.com/Bios-Marcel/discordemojimap" "github.com/agnivade/levenshtein" petname "github.com/dustinkirkland/golang-petname" "github.com/kennygrant/sanitize" ) var ( createDeleteMutex = &sync.Mutex{} lobbies []*Lobby = nil ) var ( LobbySettingBounds = &SettingBounds{ MinDrawingTime: 60, MaxDrawingTime: 300, MinRounds: 1, MaxRounds: 20, MinMaxPlayers: 2, MaxMaxPlayers: 24, MinClientsPerIPLimit: 1, MaxClientsPerIPLimit: 24, } SupportedLanguages = map[string]string{ "english": "English", "italian": "Italian", "german": "German", "french": "French", "dutch": "Dutch", } ) // SettingBounds defines the lower and upper bounds for the user-specified // lobby creation input. type SettingBounds struct { MinDrawingTime int64 MaxDrawingTime int64 MinRounds int64 MaxRounds int64 MinMaxPlayers int64 MaxMaxPlayers int64 MinClientsPerIPLimit int64 MaxClientsPerIPLimit int64 } // LineEvent is basically the same as JSEvent, but with a specific Data type. // We use this for reparsing as soon as we know that the type is right. It's // a bit unperformant, but will do for now. type LineEvent struct { Type string `json:"type"` Data *Line `json:"data"` } // LineEvent is basically the same as JSEvent, but with a specific Data type. // We use this for reparsing as soon as we know that the type is right. It's // a bit unperformant, but will do for now. type FillEvent struct { Type string `json:"type"` Data *Fill `json:"data"` } func HandleEvent(raw []byte, received *JSEvent, lobby *Lobby, player *Player) error { if received.Type == "message" { dataAsString, isString := (received.Data).(string) if !isString { return fmt.Errorf("invalid data received: '%s'", received.Data) } if strings.HasPrefix(dataAsString, "!") { handleCommand(dataAsString[1:], player, lobby) } else { handleMessage(dataAsString, player, lobby) } } else if received.Type == "line" { if lobby.canDraw(player) { line := &LineEvent{} jsonError := json.Unmarshal(raw, line) if jsonError != nil { return fmt.Errorf("error decoding data: %s", jsonError) } lobby.AppendLine(line) //We directly forward the event, as it seems to be valid. SendDataToConnectedPlayers(player, lobby, received) } } else if received.Type == "fill" { if lobby.canDraw(player) { fill := &FillEvent{} jsonError := json.Unmarshal(raw, fill) if jsonError != nil { return fmt.Errorf("error decoding data: %s", jsonError) } lobby.AppendFill(fill) //We directly forward the event, as it seems to be valid. SendDataToConnectedPlayers(player, lobby, received) } } else if received.Type == "clear-drawing-board" { if lobby.canDraw(player) && len(lobby.CurrentDrawing) > 0 { lobby.ClearDrawing() SendDataToConnectedPlayers(player, lobby, received) } } else if received.Type == "choose-word" { chosenIndex, isInt := (received.Data).(int) if !isInt { asFloat, isFloat32 := (received.Data).(float64) if isFloat32 && asFloat < 4 { chosenIndex = int(asFloat) } else { return fmt.Errorf("invalid data in choose-word event: %v", received.Data) } } drawer := lobby.Drawer if player == drawer && len(lobby.WordChoice) > 0 && chosenIndex >= 0 && chosenIndex <= 2 { lobby.CurrentWord = lobby.WordChoice[chosenIndex] lobby.WordChoice = nil lobby.WordHints = createWordHintFor(lobby.CurrentWord, false) lobby.WordHintsShown = createWordHintFor(lobby.CurrentWord, true) triggerWordHintUpdate(lobby) } } else if received.Type == "custom-word-event" { drawer := lobby.Drawer if player == drawer { lobby.CurrentWord = received.Data.(string) lobby.WordChoice = nil lobby.WordHints = createWordHintFor(lobby.CurrentWord, false) lobby.WordHintsShown = createWordHintFor(lobby.CurrentWord, true) triggerWordHintUpdate(lobby) } } else if received.Type == "kick-vote" { if !lobby.EnableVotekick { // Votekicking is disabled in the lobby // We tell the user and do not continue with the event WriteAsJSON(player, JSEvent{Type: "system-message", Data: "Votekick is disabled in this lobby!"}) } else { toKickID, isString := (received.Data).(string) if !isString { return fmt.Errorf("invalid data in kick-vote event: %v", received.Data) } handleKickEvent(lobby, player, toKickID) } } else if received.Type == "start" { if lobby.Round == 0 && player == lobby.Owner { //We are reseting each players score, since players could //technically be player a second game after the last one //has already ended. for _, otherPlayer := range lobby.Players { otherPlayer.Score = 0 otherPlayer.LastScore = 0 //Since nobody has any points in the beginning, everyone has practically //the same rank, therefore y'll winners for now. otherPlayer.Rank = 1 } advanceLobby(lobby) } } else if received.Type == "name-change" { newName, isString := (received.Data).(string) if !isString { return fmt.Errorf("invalid data in name-change event: %v", received.Data) } commandNick(player, lobby, newName) } return nil } func handleMessage(input string, sender *Player, lobby *Lobby) { trimmed := strings.TrimSpace(input) if trimmed == "" { return } if lobby.CurrentWord == "" { sendMessageToAll(trimmed, sender, lobby) return } if sender.State == Drawing || sender.State == Standby { sendMessageToAllNonGuessing(trimmed, sender, lobby) } else if sender.State == Guessing { lowerCasedInput := strings.ToLower(trimmed) lowerCasedSearched := strings.ToLower(lobby.CurrentWord) normInput := removeAccents(lowerCasedInput) normSearched := removeAccents(lowerCasedSearched) if normSearched == normInput { secondsLeft := lobby.RoundEndTime/1000 - time.Now().UTC().UnixNano()/1000000000 sender.LastScore = int(math.Ceil(math.Pow(math.Max(float64(secondsLeft), 1), 1.3) * 2)) sender.Score += sender.LastScore lobby.scoreEarnedByGuessers += sender.LastScore sender.State = Standby WriteAsJSON(sender, JSEvent{Type: "system-message", Data: "You have correctly guessed the word."}) if !lobby.isAnyoneStillGuessing() { endTurn(lobby) } else { //Since the word has been guessed correctly, we reveal it. WriteAsJSON(sender, JSEvent{Type: "update-wordhint", Data: lobby.WordHintsShown}) recalculateRanks(lobby) triggerCorrectGuessEvent(lobby) triggerPlayersUpdate(lobby) } return } else if levenshtein.ComputeDistance(normInput, normSearched) == 1 { WriteAsJSON(sender, JSEvent{Type: "system-message", Data: fmt.Sprintf("'%s' is very close.", trimmed)}) } sendMessageToAll(trimmed, sender, lobby) } } func (lobby *Lobby) isAnyoneStillGuessing() bool { for _, otherPlayer := range lobby.Players { if otherPlayer.State == Guessing && otherPlayer.Connected { return true } } return false } func sendMessageToAll(message string, sender *Player, lobby *Lobby) { escaped := html.EscapeString(discordemojimap.Replace(message)) for _, target := range lobby.Players { WriteAsJSON(target, JSEvent{Type: "message", Data: Message{ Author: html.EscapeString(sender.Name), Content: escaped, }}) } } func
(message string, sender *Player, lobby *Lobby) { escaped := html.EscapeString(discordemojimap.Replace(message)) for _, target := range lobby.Players { if target.State != Guessing { WriteAsJSON(target, JSEvent{Type: "non-guessing-player-message", Data: Message{ Author: html.EscapeString(sender.Name), Content: escaped, }}) } } } func handleKickEvent(lobby *Lobby, player *Player, toKickID string) { //Kicking yourself isn't allowed if toKickID == player.ID { return } //A player can't vote twice to kick someone if player.votedForKick[toKickID] { return } toKick := -1 for index, otherPlayer := range lobby.Players { if otherPlayer.ID == toKickID { toKick = index break } } //If we haven't found the player, we can't kick him/her. if toKick != -1 { player.votedForKick[toKickID] = true playerToKick := lobby.Players[toKick] var voteKickCount int for _, otherPlayer := range lobby.Players { if otherPlayer.votedForKick[toKickID] == true { voteKickCount++ } } votesNeeded := calculateVotesNeededToKick(len(lobby.Players)) WritePublicSystemMessage(lobby, fmt.Sprintf("(%d/%d) players voted to kick %s", voteKickCount, votesNeeded, playerToKick.Name)) if voteKickCount >= votesNeeded { //Since the player is already kicked, we first clean up the kicking information related to that player for _, otherPlayer := range lobby.Players { if otherPlayer.votedForKick[toKickID] == true { delete(player.votedForKick, toKickID) break } } WritePublicSystemMessage(lobby, fmt.Sprintf("%s has been kicked from the lobby", playerToKick.Name)) if lobby.Drawer == playerToKick { WritePublicSystemMessage(lobby, "Since the kicked player has been drawing, none of you will get any points this round.") //Since the drawing person has been kicked, that probably means that he/she was trolling, therefore //we redact everyones last earned score. for _, otherPlayer := range lobby.Players { otherPlayer.Score -= otherPlayer.LastScore otherPlayer.LastScore = 0 } lobby.scoreEarnedByGuessers = 0 //We must absolutely not set lobby.Drawer to nil, since this would cause the drawing order to be ruined. } if playerToKick.ws != nil { playerToKick.ws.Close() } lobby.Players = append(lobby.Players[:toKick], lobby.Players[toKick+1:]...) recalculateRanks(lobby) //If the owner is kicked, we choose the next best person as the owner. if lobby.Owner == playerToKick { for _, otherPlayer := range lobby.Players { potentialOwner := otherPlayer if potentialOwner.Connected { lobby.Owner = potentialOwner WritePublicSystemMessage(lobby, fmt.Sprintf("%s is the new lobby owner.", potentialOwner.Name)) break } } } triggerPlayersUpdate(lobby) if lobby.Drawer == playerToKick || !lobby.isAnyoneStillGuessing() { endTurn(lobby) } } } } func calculateVotesNeededToKick(amountOfPlayers int) int { //If the amount of players equals an even number, such as 6, we will always //need half of that. If the amount is uneven, we'll get a floored result. //therefore we always add one to the amount. //examples: // (6+1)/2 = 3 // (5+1)/2 = 3 //Therefore it'll never be possible for a minority to kick a player. return (amountOfPlayers + 1) / 2 } func handleCommand(commandString string, caller *Player, lobby *Lobby) { command := commands.ParseCommand(commandString) if len(command) >= 1 { switch strings.ToLower(command[0]) { case "setmp": commandSetMP(caller, lobby, command) case "help": //TODO } } } func commandNick(caller *Player, lobby *Lobby, name string) { newName := html.EscapeString(strings.TrimSpace(name)) //We don't want super-long names if len(newName) > 30 { newName = newName[:31] } if newName == "" { caller.Name = GeneratePlayerName() } else { caller.Name = newName } fmt.Printf("%s is now %s\n", caller.Name, newName) triggerPlayersUpdate(lobby) } func commandSetMP(caller *Player, lobby *Lobby, args []string) { if caller == lobby.Owner { if len(args) < 2 { return } newMaxPlayersValue := strings.TrimSpace(args[1]) newMaxPlayersValueInt, err := strconv.ParseInt(newMaxPlayersValue, 10, 64) if err == nil { if int(newMaxPlayersValueInt) >= len(lobby.Players) && newMaxPlayersValueInt <= LobbySettingBounds.MaxMaxPlayers && newMaxPlayersValueInt >= LobbySettingBounds.MinMaxPlayers { lobby.MaxPlayers = int(newMaxPlayersValueInt) WritePublicSystemMessage(lobby, fmt.Sprintf("MaxPlayers value has been changed to %d", lobby.MaxPlayers)) } else { if len(lobby.Players) > int(LobbySettingBounds.MinMaxPlayers) { WriteAsJSON(caller, JSEvent{Type: "system-message", Data: fmt.Sprintf("MaxPlayers value should be between %d and %d.", len(lobby.Players), LobbySettingBounds.MaxMaxPlayers)}) } else { WriteAsJSON(caller, JSEvent{Type: "system-message", Data: fmt.Sprintf("MaxPlayers value should be between %d and %d.", LobbySettingBounds.MinMaxPlayers, LobbySettingBounds.MaxMaxPlayers)}) } } } else { WriteAsJSON(caller, JSEvent{Type: "system-message", Data: fmt.Sprintf("MaxPlayers value must be numeric.")}) } } else { WriteAsJSON(caller, JSEvent{Type: "system-message", Data: fmt.Sprintf("Only the lobby owner can change MaxPlayers setting.")}) } } func endTurn(lobby *Lobby) { if lobby.timeLeftTicker != nil { lobby.timeLeftTicker.Stop() lobby.timeLeftTicker = nil } var roundOverMessage string if lobby.CurrentWord == "" { roundOverMessage = "Round over. No word was chosen." } else { roundOverMessage = fmt.Sprintf("Round over. The word was '%s'", lobby.CurrentWord) } //The drawer can potentially be null if he's kicked, in that case we proceed with the round if anyone has already drawer := lobby.Drawer if drawer != nil && lobby.scoreEarnedByGuessers > 0 { averageScore := float64(lobby.scoreEarnedByGuessers) / float64(len(lobby.Players)-1) if averageScore > 0 { drawer.LastScore = int(averageScore * 1.1) drawer.Score += drawer.LastScore } } lobby.scoreEarnedByGuessers = 0 lobby.alreadyUsedWords = append(lobby.alreadyUsedWords, lobby.CurrentWord) lobby.CurrentWord = "" lobby.WordHints = nil //If the round ends and people still have guessing, that means the "Last" value ////for the next turn has to be "no score earned". for _, otherPlayer := range lobby.Players { if otherPlayer.State == Guessing { otherPlayer.LastScore = 0 } } WritePublicSystemMessage(lobby, roundOverMessage) advanceLobby(lobby) } // advanceLobby will either start the game or jump over to the next turn. func advanceLobby(lobby *Lobby) { for _, otherPlayer := range lobby.Players { otherPlayer.State = Guessing otherPlayer.votedForKick = make(map[string]bool) } lobby.ClearDrawing() newDrawer, roundOver := selectNextDrawer(lobby) if roundOver { if lobby.Round == lobby.MaxRounds { endGame(lobby) return } lobby.Round++ } lobby.Drawer = newDrawer lobby.Drawer.State = Drawing lobby.WordChoice = GetRandomWords(lobby) recalculateRanks(lobby) //We use milliseconds for higher accuracy lobby.RoundEndTime = time.Now().UTC().UnixNano()/1000000 + int64(lobby.DrawingTime)*1000 lobby.timeLeftTicker = time.NewTicker(1 * time.Second) go roundTimerTicker(lobby) TriggerComplexUpdateEvent("next-turn", &NextTurn{ Round: lobby.Round, Players: lobby.Players, RoundEndTime: int(lobby.RoundEndTime - getTimeAsMillis()), }, lobby) WriteAsJSON(lobby.Drawer, &JSEvent{Type: "your-turn", Data: lobby.WordChoice}) } func endGame(lobby *Lobby) { lobby.Drawer = nil lobby.Round = 0 recalculateRanks(lobby) triggerPlayersUpdate(lobby) WritePublicSystemMessage(lobby, "Game over. Type !start again to start a new round.") } // selectNextDrawer returns the next person that's supposed to be drawing, but // doesn't tell the lobby yet. The boolean signals whether the current round is // over. func selectNextDrawer(lobby *Lobby) (*Player, bool) { for index, otherPlayer := range lobby.Players { if otherPlayer == lobby.Drawer { //If we have someone that's drawing, take the next one for i := index + 1; i < len(lobby.Players); i++ { player := lobby.Players[i] if player.Connected { return player, false } } } } return lobby.Players[0], true } func roundTimerTicker(lobby *Lobby) { hintsLeft := 2 revealHintAtMillisecondsLeft := lobby.DrawingTime * 1000 / 3 for { ticker := lobby.timeLeftTicker if ticker == nil { return } select { case <-ticker.C: currentTime := getTimeAsMillis() if currentTime >= lobby.RoundEndTime { go endTurn(lobby) } if hintsLeft > 0 && lobby.WordHints != nil { timeLeft := lobby.RoundEndTime - currentTime if timeLeft <= int64(revealHintAtMillisecondsLeft*hintsLeft) { hintsLeft-- for { randomIndex := rand.Int() % len(lobby.WordHints) if lobby.WordHints[randomIndex].Character == 0 { lobby.WordHints[randomIndex].Character = []rune(lobby.CurrentWord)[randomIndex] triggerWordHintUpdate(lobby) break } } } } } } } func getTimeAsMillis() int64 { return time.Now().UTC().UnixNano() / 1000000 } // NextTurn represents the data necessary for displaying the lobby state right // after a new turn started. Meaning that no word has been chosen yet and // therefore there are no wordhints and no current drawing instructions. type NextTurn struct { Round int `json:"round"` Players []*Player `json:"players"` RoundEndTime int `json:"roundEndTime"` } // recalculateRanks will assign each player his respective rank in the lobby // according to everyones current score. This will not trigger any events. func recalculateRanks(lobby *Lobby) { for _, a := range lobby.Players { if !a.Connected { continue } playersThatAreHigher := 0 for _, b := range lobby.Players { if !b.Connected { continue } if b.Score > a.Score { playersThatAreHigher++ } } a.Rank = playersThatAreHigher + 1 } } func createWordHintFor(word string, showAll bool) []*WordHint { wordHints := make([]*WordHint, 0, len(word)) for _, char := range word { irrelevantChar := char == ' ' || char == '_' || char == '-' if showAll { wordHints = append(wordHints, &WordHint{ Character: char, Underline: !irrelevantChar, }) } else { if irrelevantChar { wordHints = append(wordHints, &WordHint{ Character: char, Underline: !irrelevantChar, }) } else { wordHints = append(wordHints, &WordHint{ Underline: !irrelevantChar, }) } } } return wordHints } var TriggerSimpleUpdateEvent func(eventType string, lobby *Lobby) var TriggerComplexUpdatePerPlayerEvent func(eventType string, data func(*Player) interface{}, lobby *Lobby) var TriggerComplexUpdateEvent func(eventType string, data interface{}, lobby *Lobby) var SendDataToConnectedPlayers func(sender *Player, lobby *Lobby, data interface{}) var WriteAsJSON func(player *Player, object interface{}) error var WritePublicSystemMessage func(lobby *Lobby, text string) func triggerPlayersUpdate(lobby *Lobby) { TriggerComplexUpdateEvent("update-players", lobby.Players, lobby) } func triggerCorrectGuessEvent(lobby *Lobby) { TriggerSimpleUpdateEvent("correct-guess", lobby) } func triggerWordHintUpdate(lobby *Lobby) { if lobby.CurrentWord == "" { return } TriggerComplexUpdatePerPlayerEvent("update-wordhint", func(player *Player) interface{} { return lobby.GetAvailableWordHints(player) }, lobby) } type Rounds struct { Round int `json:"round"` MaxRounds int `json:"maxRounds"` } // CreateLobby allows creating a lobby, optionally returning errors that // occurred during creation. func CreateLobby(playerName, language string, drawingTime, rounds, maxPlayers, customWordChance, clientsPerIPLimit int, customWords []string, enableVotekick bool) (*Player, *Lobby, error) { lobby := createLobby(drawingTime, rounds, maxPlayers, customWords, customWordChance, clientsPerIPLimit, enableVotekick) player := createPlayer(playerName) lobby.Players = append(lobby.Players, player) lobby.Owner = player // Read wordlist according to the chosen language words, err := readWordList(language) if err != nil { RemoveLobby(lobby.ID) return nil, nil, err } lobby.Words = words return player, lobby, nil } // GeneratePlayerName creates a new playername. A so called petname. It consists // of an adverb, an adjective and a animal name. The result can generally be // trusted to be sane. func GeneratePlayerName() string { adjective := strings.Title(petname.Adjective()) adverb := strings.Title(petname.Adverb()) name := strings.Title(petname.Name()) return adverb + adjective + name } // Message represents a message in the chatroom. type Message struct { // Author is the player / thing that wrote the message Author string `json:"author"` // Content is the actual message text. Content string `json:"content"` } // Ready represents the initial state that a user needs upon connection. // This includes all the necessary things for properly running a client // without receiving any more data. type Ready struct { PlayerID string `json:"playerId"` PlayerName string `json:"playerName"` Drawing bool `json:"drawing"` OwnerID string `json:"ownerId"` Round int `json:"round"` MaxRound int `json:"maxRounds"` RoundEndTime int `json:"roundEndTime"` WordHints []*WordHint `json:"wordHints"` Players []*Player `json:"players"` CurrentDrawing []interface{} `json:"currentDrawing"` } func OnConnected(lobby *Lobby, player *Player) { player.Connected = true WriteAsJSON(player, JSEvent{Type: "ready", Data: &Ready{ PlayerID: player.ID, Drawing: player.State == Drawing, PlayerName: player.Name, OwnerID: lobby.Owner.ID, Round: lobby.Round, MaxRound: lobby.MaxRounds, RoundEndTime: int(lobby.RoundEndTime - getTimeAsMillis()), WordHints: lobby.GetAvailableWordHints(player), Players: lobby.Players, CurrentDrawing: lobby.CurrentDrawing, }}) //This state is reached when the player refreshes before having chosen a word. if lobby.Drawer == player && lobby.CurrentWord == "" { WriteAsJSON(lobby.Drawer, &JSEvent{Type: "your-turn", Data: lobby.WordChoice}) } updateRocketChat(lobby, player) //TODO Only send to everyone except for the new player, since it's part of the ready event. triggerPlayersUpdate(lobby) } func OnDisconnected(lobby *Lobby, player *Player) { //We want to avoid calling the handler twice. if player.ws == nil { return } player.Connected = false player.ws = nil updateRocketChat(lobby, player) if !lobby.HasConnectedPlayers() { RemoveLobby(lobby.ID) log.Printf("Closing lobby %s. There are currently %d open lobbies left.\n", lobby.ID, len(lobbies)) } else { triggerPlayersUpdate(lobby) } } func (lobby *Lobby) GetAvailableWordHints(player *Player) []*WordHint { //The draw simple gets every character as a word-hint. We basically abuse //the hints for displaying the word, instead of having yet another GUI //element that wastes space. if player.State == Drawing || player.State == Standby { return lobby.WordHintsShown } else { return lobby.WordHints } } func (lobby *Lobby) JoinPlayer(playerName string) *Player { player := createPlayer(playerName) //FIXME Make a dedicated method that uses a mutex? lobby.Players = append(lobby.Players, player) recalculateRanks(lobby) triggerPlayersUpdate(lobby) return player } func (lobby *Lobby) canDraw(player *Player) bool { return lobby.Drawer == player && lobby.CurrentWord != "" } func removeAccents(s string) string { return strings. NewReplacer(" ", "", "-", "", "_", ""). Replace(sanitize.Accents(s)) }
sendMessageToAllNonGuessing
__main__.py
import sys from argparse import ArgumentParser from . import search def parse_args(): parser = ArgumentParser(prog="googlelyrics") parser.add_argument( "--no-header", action="store_true", help="don't print the info header" ) parser.add_argument("query", type=str, nargs="+", help="search query") return parser.parse_args() # pylint: disable=broad-except def main():
if __name__ == "__main__": main()
args = parse_args() try: lyrics = search(" ".join(args.query)) except BaseException: sys.exit(sys.exc_info()[1]) if not lyrics: sys.exit("No lyrics found") if not args.no_header: print(f"{lyrics.artist} - {lyrics.title}\n") print("\n".join(lyrics.lines))
term.entity.ts
import { Column, Entity, OneToMany, OneToOne } from 'typeorm'; import { DeepPartial } from '@app/common/shared-types'; import { BaseEntity } from '@app/common/base.entity'; import { TermMeta } from './term-meta.entity'; import { TermTaxonomy } from './term-taxonomy.entity'; // @Index(['name'], { unique: true }) /** * 分类项 */ @Entity('terms') export class Term e
ds BaseEntity { constructor(input?: DeepPartial<Term>) { super(input); } @Column('varchar', { length: 200, comment: '名称', }) name: string; @Column('varchar', { length: 200, comment: '名称地址标识', unique: true, }) slug: string; @Column({ type: 'int', comment: '排序分组', default: 0, }) group: number; @OneToMany(type => TermMeta, termMeta => termMeta.term, { cascade: true, }) metas?: TermMeta[]; @OneToOne(type => TermTaxonomy, termTaxonomy => termTaxonomy.term, { cascade: true, }) taxonomy: TermTaxonomy; // @OneToOne(type => TermRelationships, termRelationships => termRelationships.taxonomy, { // cascade: true, // }) // termRelationships: TermRelationships; }
xten
18.ts
import { Card } from '../../../interfaces' import Set from '../Chilling Reign' const card: Card = { set: Set, name: { en: "Rillaboom", fr: "Gorythmic", es: "Rillaboom", it: "Rillaboom", pt: "Rillaboom", de: "Gortrom" }, illustrator: "Hitoshi Ariga", rarity: "Rare", category: "Pokemon", hp: 180, types: ["Grass"], evolveFrom: { en: "Thwackey", fr: "Badabouin" }, attacks: [{ name: { en: "Wood Drain", fr: "Regain Sylvestre", es: "Drenaje del Bosque", it: "Assorbilegno", pt: "Dreno de Madeira", de: "Holzsauger" },
effect: { en: "Heal 30 damage from this Pokémon.", fr: "Soignez 30 dégâts de ce Pokémon.", es: "Cura 30 puntos de daño a este Pokémon.", it: "Cura questo Pokémon da 30 danni.", pt: "Cure 30 pontos de dano deste Pokémon.", de: "Heile 30 Schadenspunkte bei diesem Pokémon." }, damage: 60, cost: ["Grass", "Colorless"] }, { name: { en: "Raging Repeated Strike", fr: "Frappes Effrénées", es: "Golpe Furioso Incesante", it: "Colpo Raffica Furente", pt: "Golpes Ferozes Múltiplos", de: "Tobender Wiederholungsschlag" }, effect: { en: "Discard any amount of Energy from your Pokémon. This attack does 30 more damage for each card you discarded in this way.", fr: "Défaussez autant d’Énergies que vous le voulez de vos Pokémon. Cette attaque inflige 30 dégâts supplémentaires pour chaque carte défaussée de cette façon.", es: "Descarta cualquier cantidad de Energías de tus Pokémon. Este ataque hace 30 puntos de daño más por cada carta que hayas descartado de esta manera.", it: "Scarta tutte le Energie che vuoi dai tuoi Pokémon. Questo attacco infligge 30 danni in più per ogni carta che hai scartato in questo modo.", pt: "Descarte qualquer quantidade de Energia dos seus Pokémon. Este ataque causa 30 pontos de dano a mais para cada carta descartada desta forma.", de: "Lege beliebig viele Energien von deinen Pokémon auf deinen Ablagestapel. Diese Attacke fügt für jede auf diese Weise abgelegte Karte 30 Schadenspunkte mehr zu." }, damage: "120+", cost: ["Grass", "Grass", "Colorless"] }], weaknesses: [{ type: "Fire", value: "×2" }], retreat: 3, regulationMark: "E", variants: { normal: false, reverse: true, holo: true, firstEdition: false }, stage: "Stage2", description: { en: "The one with the best drumming techniques becomes the boss of the troop. It has a gentle disposition and values harmony among its group." } } export default card
lib.rs
//! Derive traits for Ketos scripting language //! //! Provides a set of custom `#[derive(...)]` macros for convenience when using Ketos. //! //! One or more of the following names can be added to the `derive` attribute of //! any struct or enum value. For example: //! //! ```ignore //! extern crate ketos; //! #[macro_use] extern crate ketos_derive; //! //! #[derive(Clone, Debug, ForeignValue, FromValue, IntoValue)] //! struct Foo { //! // ... //! } //! ``` //! //! ## `derive(ForeignValue)` //! //! Implements [`ForeignValue`](https://docs.rs/ketos/*/ketos/value/trait.ForeignValue.html) //! for the given type. The only method implemented by this macro is `type_name`. //! All other methods retain their default implementations. //! //! The `ForeignValue` trait must be implemented (either manually or using this `derive`) //! for any of the other `derive` implementations to succeed. //! //! ## `derive(FromValue)` //! //! Implements [`FromValue`](https://docs.rs/ketos/*/ketos/value/trait.FromValue.html) //! for the given type. //! //! The generated implementation requires that the instance of the type held by the //! Ketos `Value` is unique, i.e. the contained `Rc` has a reference count of `1`. //! //! If your type implements `Clone`, `derive(FromValueClone)` will instead generate //! an implementation of `FromValue` that clones the contained value, if necessary. //! //! ## `derive(FromValueClone)` //! //! Implements [`FromValue`](https://docs.rs/ketos/*/ketos/value/trait.FromValue.html) //! for the given type, provided that the type implements the `Clone` trait. //! //! If the value contained in the Ketos `Value` is not unique, the result will be //! a clone of the contained value. //! //! ## `derive(FromValueRef)` //! //! Implements [`FromValueRef`](https://docs.rs/ketos/*/ketos/value/trait.FromValueRef.html) //! for the given type. //! //! ## `derive(IntoValue)` //! //! Implements `Into<Value>` for the given type. //! //! ## `derive(StructValue)` //! //! Implements [`StructValue`](https://docs.rs/ketos/*/ketos/structs/trait.StructValue.html) //! for the given type, provided that the type implements `Clone` and all fields //! implement `Clone`, `FromValue`, and `Into<Value>`. //! //! Types implementing `StructValue` can be constructed with `new` in Ketos code //! and have their fields accessed and modified with the `.` and `.=` functions. #![recursion_limit = "256"] extern crate proc_macro; extern crate proc_macro2; #[macro_use] extern crate quote; extern crate syn; use proc_macro::TokenStream; use proc_macro2::Span; use quote::{ToTokens, Tokens}; use syn::{ AttrStyle, Attribute, Data, DataStruct, DeriveInput, Fields, GenericParam, Generics, Ident, Lifetime, LifetimeDef, Lit, Meta, NestedMeta, Path, PathArguments, TypeGenerics, WhereClause, }; #[proc_macro_derive(ForeignValue)] pub fn derive_foreign_value(input: TokenStream) -> TokenStream { let ast: DeriveInput = syn::parse(input).expect("syn::parse"); let name = ast.ident; let name_str: &str = name.as_ref(); let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); let expr = quote!{ impl #impl_generics ::ketos::ForeignValue for #name #ty_generics #where_clause { fn type_name(&self) -> &'static str { #name_str } } }; expr.to_string().parse().expect("parse quote!") } #[proc_macro_derive(FromValue)] pub fn derive_from_value(input: TokenStream) -> TokenStream { let ast: DeriveInput = syn::parse(input).expect("syn::parse"); let name = ast.ident; let name_str: &str = name.as_ref(); let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); let expr = quote!{ impl #impl_generics ::ketos::FromValue for #name #ty_generics #where_clause { fn from_value(v: ::ketos::Value) -> ::std::result::Result<Self, ::ketos::ExecError> { match v { ::ketos::Value::Foreign(fv) => { match ::ketos::ForeignValue::downcast_rc(fv) { ::std::result::Result::Ok(v) => { match ::std::rc::Rc::try_unwrap(v) { ::std::result::Result::Ok(v) => ::std::result::Result::Ok(v), ::std::result::Result::Err(_) => ::std::result::Result::Err( ::ketos::panic(concat!(#name_str, " value is not unique"))) } } ::std::result::Result::Err(rc) => { ::std::result::Result::Err( ::ketos::ExecError::expected(#name_str, &::ketos::Value::Foreign(rc))) } } } ref v => ::std::result::Result::Err( ::ketos::ExecError::expected(#name_str, v)) } } } }; expr.to_string().parse().expect("parse quote!") } #[proc_macro_derive(FromValueClone)] pub fn derive_from_value_clone(input: TokenStream) -> TokenStream { let ast: DeriveInput = syn::parse(input).expect("syn::parse"); let name = ast.ident; let name_str: &str = name.as_ref(); let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); let expr = quote!{ impl #impl_generics ::ketos::FromValue for #name #ty_generics #where_clause { fn from_value(v: ::ketos::Value) -> ::std::result::Result<Self, ::ketos::ExecError> { match v { ::ketos::Value::Foreign(fv) => { match ::ketos::ForeignValue::downcast_rc(fv) { ::std::result::Result::Ok(v) => { match ::std::rc::Rc::try_unwrap(v) { ::std::result::Result::Ok(v) => ::std::result::Result::Ok(v), ::std::result::Result::Err(rc) => ::std::result::Result::Ok((*rc).clone()) } } ::std::result::Result::Err(rc) => { ::std::result::Result::Err( ::ketos::ExecError::expected(#name_str, &::ketos::Value::Foreign(rc))) } } } ref v => ::std::result::Result::Err( ::ketos::ExecError::expected(#name_str, v)) } } } }; expr.to_string().parse().expect("parse quote!") } #[proc_macro_derive(FromValueRef)] pub fn derive_from_value_ref(input: TokenStream) -> TokenStream { let ast: DeriveInput = syn::parse(input).expect("syn::parse"); let name = ast.ident; let name_str: &str = name.as_ref(); let (impl_generics, ty_generics, where_clause) = split_with_lifetime(&ast.generics); let expr = quote!{ impl #impl_generics ::ketos::FromValueRef<'value> for &'value #name #ty_generics #where_clause { fn from_value_ref(v: &'value ::ketos::Value) -> ::std::result::Result<Self, ::ketos::ExecError> { if let ::ketos::Value::Foreign(ref fv) = *v { if let ::std::option::Option::Some(v) = fv.downcast_ref() { return ::std::result::Result::Ok(v); } } ::std::result::Result::Err( ::ketos::ExecError::expected(#name_str, v)) } } }; expr.to_string().parse().expect("parse quote!") } #[proc_macro_derive(IntoValue)] pub fn derive_into_value(input: TokenStream) -> TokenStream { let ast: DeriveInput = syn::parse(input).expect("syn::parse"); let name = ast.ident; let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); let expr = quote!{ impl #impl_generics Into<::ketos::Value> for #name #ty_generics #where_clause { fn into(self) -> ::ketos::Value { ::ketos::Value::new_foreign(self) } } }; expr.to_string().parse().expect("parse quote!") } #[proc_macro_derive(StructValue, attributes(ketos))] pub fn derive_struct_value(input: TokenStream) -> TokenStream { let ast: DeriveInput = syn::parse(input).expect("syn::parse"); let name = ast.ident; let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); let fields = match ast.data { Data::Enum(_) => panic!("cannot derive StructValue for enum types"), Data::Struct(DataStruct{fields: Fields::Unit, ..}) => panic!("cannot derive StructValue for unit struct types"), Data::Struct(DataStruct{fields: Fields::Unnamed(..), ..}) => panic!("cannot derive StructValue for tuple struct types"), Data::Struct(DataStruct{ref fields, ..}) => fields, Data::Union(_) => panic!("cannot derive StructValue for union types"), }; let name_str: &str = name.as_ref(); let mut local = Vec::new(); let mut field_name = Vec::new(); let mut field_str = Vec::new(); let mut handle_field = Vec::new(); let mut handle_set_field = Vec::new(); for field in fields { let opts = parse_attrs(&field.attrs); let ident = field.ident.as_ref().unwrap(); let ty = &field.ty; let field_s = opts.rename.unwrap_or_else( || make_field_name(ident.as_ref())); // A local binding is created for each field name. // It must not conflict with any other bindings in method implementations. let local_ident = Ident::from(format!("__{}", ident)); local.push(local_ident.clone()); field_name.push(ident.clone()); field_str.push(field_s);
}); handle_set_field.push(quote!{ self.#ident = <#ty as ::ketos::FromValue>::from_value(value)?; }); } // Explicitly borrow these so they may be used in multiple quote! expressions let field_name = &field_name; let local = &local; let field_str = &field_str; let expr = quote!{ impl #impl_generics ::ketos::StructValue for #name #ty_generics #where_clause { fn struct_name() -> &'static str { #name_str } fn from_fields(scope: &::ketos::Scope, def: &::std::rc::Rc<::ketos::StructDef>, fields: &mut [(::ketos::Name, ::ketos::Value)]) -> ::std::result::Result<Self, ::ketos::Error> { #( let mut #local = None; )* let mut iter = fields.iter_mut(); while let ::std::option::Option::Some( &mut (name, ref mut field)) = iter.next() { let value = field.take(); scope.with_name(name, |name_str| { match name_str { #( #field_str => { #handle_field } , )* _ => return ::std::result::Result::Err(::ketos::Error::ExecError( ::ketos::ExecError::MissingField{ struct_name: def.name(), field: name, })) } ::std::result::Result::Ok(()) })?; } ::std::result::Result::Ok(#name{ #( #field_name : #local.ok_or_else( || ::ketos::Error::ExecError(::ketos::ExecError::MissingField{ struct_name: def.name(), field: scope.add_name(#field_str), }))? ),* }) } fn field_names() -> &'static [&'static str] { static FIELDS: &'static [&'static str] = &[ #( #field_str ),* ]; FIELDS } fn get_field(&self, scope: &::ketos::Scope, def: &::std::rc::Rc<::ketos::StructDef>, name: ::ketos::Name) -> ::std::result::Result<::ketos::Value, ::ketos::Error> { scope.with_name(name, |name_str| { match name_str { #( #field_str => { ::std::result::Result::Ok(self.#field_name.clone().into()) } , )* _ => ::std::result::Result::Err(::ketos::Error::ExecError( ::ketos::ExecError::FieldError{ struct_name: def.name(), field: name, })) } }) } fn replace_fields(&mut self, scope: &::ketos::Scope, def: &::std::rc::Rc<::ketos::StructDef>, fields: &mut [(::ketos::Name, ::ketos::Value)]) -> ::std::result::Result<(), ::ketos::Error> { for &mut (name, ref mut value) in fields { let value = value.take(); scope.with_name(name, |name_str| { match name_str { #( #field_str => { #handle_set_field } , )* _ => return ::std::result::Result::Err(::ketos::Error::ExecError( ::ketos::ExecError::FieldError{ struct_name: def.name(), field: name, })) } ::std::result::Result::Ok(()) })?; } ::std::result::Result::Ok(()) } } }; expr.to_string().parse().expect("parse quote!") } #[derive(Default)] struct AttrOpts { rename: Option<String>, } fn parse_attrs(attrs: &[Attribute]) -> AttrOpts { let mut opts = AttrOpts::default(); for attr in attrs { if is_outer(attr.style) && path_eq(&attr.path, "ketos") { let meta = attr.interpret_meta().unwrap_or_else( || panic!("invalid attribute: {}", tokens_str(attr))); match meta { Meta::Word(_) => panic!("#[ketos] is not a valid attribute"), Meta::NameValue(..) => panic!("#[ketos = ...] is not a valid attribute"), Meta::List(ref items) => { for item in &items.nested { match *item { NestedMeta::Literal(_) => panic!("unexpected meta item `{}`", tokens_str(item)), NestedMeta::Meta(ref item) => { match *item { Meta::NameValue(ref nv) => { match nv.ident.as_ref() { "rename" => opts.rename = Some(lit_str(&nv.lit)), _ => panic!("unexpected meta item `{}`", tokens_str(item)) } } _ => panic!("unexpected meta item `{}`", tokens_str(item)) } } } } } } } } opts } fn path_eq(path: &Path, s: &str) -> bool { path.segments.len() == 1 && { let seg = path.segments.first().unwrap().into_value(); match seg.arguments { PathArguments::None => seg.ident.as_ref() == s, _ => false } } } fn is_outer(style: AttrStyle) -> bool { match style { AttrStyle::Outer => true, _ => false } } fn lit_str(lit: &Lit) -> String { match *lit { Lit::Str(ref s) => s.value(), _ => panic!("unexpected literal `{}`", tokens_str(lit)) } } fn make_field_name(name: &str) -> String { name.replace("_", "-") } fn tokens_str<T: ToTokens>(t: &T) -> String { let mut tok = Tokens::new(); t.to_tokens(&mut tok); tok.to_string() } fn split_with_lifetime(generics: &Generics) -> (LtImplGenerics, TypeGenerics, Option<&WhereClause>) { let (_, ty_generics, where_clause) = generics.split_for_impl(); (LtImplGenerics(generics), ty_generics, where_clause) } struct LtImplGenerics<'a>(&'a Generics); impl<'a> ToTokens for LtImplGenerics<'a> { fn to_tokens(&self, tokens: &mut Tokens) { let mut generics = self.0.clone(); let lt = LifetimeDef::new(Lifetime::new("'value", Span::call_site())); generics.params.insert(0, GenericParam::Lifetime(lt)); let (impl_generics, _, _) = generics.split_for_impl(); impl_generics.to_tokens(tokens); } }
handle_field.push(quote!{ let v = <#ty as ::ketos::FromValue>::from_value(value)?; #local_ident = ::std::option::Option::Some(v);
build.rs
use std::env; fn main() { if cfg!(feature = "pre-generated-bindings") { // only invoke pkgconf, and use the pre-generated bindings in srtp.rs find_libsrtp2(""); return; } let out_dir = &env::var("OUT_DIR").unwrap(); println!("cargo:rerun-if-changed=wrapper.h"); let mut bindgen_builder = bindgen::Builder::default() .clang_args(&["-I./libsrtp/include"]) .header("wrapper.h") .whitelist_function("(srtp|SRTP|srtcp|SRTCP)_.*") .whitelist_type("(srtp|SRTP|srtcp|SRTCP)_.*") .whitelist_var("(srtp|SRTP|srtcp|SRTCP)_.*"); if !cfg!(feature = "enable-openssl") { bindgen_builder = bindgen_builder.blacklist_item(".*(192|gcm|GCM).*") } bindgen_builder .generate() .expect("Failed to generate libsrtp2 binding") .write_to_file(format!("{}/bindings.rs", out_dir)) .expect("Failed to write libsrtp2 binding"); if cfg!(feature = "skip-linking") { return; } if cfg!(all(feature = "dynamic-linking", target_env = "msvc")) { panic!("dynamic-linking feature is not currently supported for MSVC!"); } if cfg!(all(feature = "dynamic-linking", feature = "skip-linking")) { panic!("dynamic-linking feature cannot be used alongside skip-linking feature!"); } find_libsrtp2(out_dir); } #[cfg(all(target_env = "msvc", feature = "build"))] fn find_libsrtp2(_out_dir: &str) { compile_error!("building libsrtp2 from source is not supported on windows"); } #[cfg(all(target_env = "msvc", not(feature = "build")))] fn find_libsrtp2(_out_dir: &str) { vcpkg::find_package("libsrtp") .expect("Failed to find libsrtp via vcpkg"); } #[cfg(all(not(target_env = "msvc"), not(feature = "build")))] fn
(_out_dir: &str) { pkg_config::Config::new() .atleast_version("2.3.0") .statik(cfg!(not(feature = "dynamic-linking"))) .probe("libsrtp2") .expect("Failed to find libsrtp2 via pkg-config"); } #[cfg(all(not(target_env = "msvc"), feature = "build"))] fn find_libsrtp2(out_dir: &str) { use std::process::Command; let crate_dir = &env::var("CARGO_MANIFEST_DIR").unwrap(); let mut configure = Command::new("/bin/sh"); configure.arg(format!("{}/libsrtp/configure", crate_dir)); if std::env::var_os("SRTP2_SYS_DEBUG_LOGGING").is_some() { configure.arg("--enable-debug-logging"); match std::env::var("SRTP2_SYS_DEBUG_LOG_FILE") { Ok(path) => configure.arg(format!("--with-log-file={}", path)), Err(_) => configure.arg("--enable-log-stdout"), }; } #[cfg(feature = "enable-openssl")] { let openssl_include = env::var("DEP_OPENSSL_INCLUDE").unwrap(); configure .arg("--enable-openssl") .env("crypto_CFLAGS", format!("-I{}", openssl_include)) // Below are to fake the libsrtp build system // so it believes we have proper openssl library. // The library itself will be provided by the `openssl-sys` crate // but at this point we can't know where it is. .env("crypto_LIBS", " ") .env("ac_cv_search_EVP_EncryptInit", " ") .env("ac_cv_search_EVP_aes_128_ctr", " ") .env("ac_cv_search_EVP_aes_128_gcm", " "); } let out = configure .current_dir(out_dir) .output() .expect("Failed to execute `./configure` on libsrtp"); assert!( out.status.success(), "`./configure` executed unsuccessfully on libsrtp\nSTDOUT: {}\nSTDERR: {}", String::from_utf8_lossy(&out.stdout), String::from_utf8_lossy(&out.stderr), ); let out = make_cmd::make() .arg("libsrtp2.a") .current_dir(out_dir) .output() .expect("Failed to execute `make` on libsrtp"); assert!( out.status.success(), "`make` executed unsuccessfully on libsrtp\nSTDOUT: {}\nSTDERR: {}", String::from_utf8_lossy(&out.stdout), String::from_utf8_lossy(&out.stderr), ); println!("cargo:rerun-if-changed=libsrtp"); println!("cargo:rustc-link-lib=static=srtp2"); println!("cargo:rustc-link-search={}", out_dir); }
find_libsrtp2
data_voc.py
import numpy as np import random import xml import cv2 import os def read_file(file_name): """ 读取 file_name 文件全部内容 return:文件内容list """ if not os.path.isfile(file_name): return None result = [] with open(file_name, 'r') as f: for line in f.readlines(): # 去掉换行符和空格 line = line.strip('\n').strip() if len(line) == 0: continue result.append(line) return result def word2id(names_file): """ 得到 名字 到 id 的转换字典 return {} """ id_dict = {} contents = read_file(names_file) for i in range(len(contents)): id_dict[str(contents[i])] = i return id_dict def parse_voc_xml(file_name, names_dict): """ 解析voc数据集的 xml 文件,每一个列表表示一个图片中的全部标签 return [ [id1
ti_scale_img=True, width=608, height=608): self.data_dirs = [os.path.join(os.path.join(voc_root_dir, voc_dir), "JPEGImages") for voc_dir in voc_dir_ls] # 数据文件路径 self.class_num = class_num # 分类数 self.batch_size = batch_size self.anchors = np.asarray(anchors).astype(np.float32).reshape([-1, 2]) / [width, height] # [9,2] print("anchors:\n", self.anchors) self.multi_scale_img = multi_scale_img # 多尺度缩放图片 self.imgs_path = [] self.labels_path = [] self.num_batch = 0 # 多少个 batch 了 self.num_imgs = 0 # 一共多少张图片 self.width = width self.height = height self.names_dict = word2id(voc_names) # 名字到 id 的字典 # 初始化各项参数 self.__init_args() # 初始化各项参数 def __init_args(self): print("message:开始初始化路径") # init imgs path for voc_dir in self.data_dirs: for img_name in os.listdir(voc_dir): img_path = os.path.join(voc_dir, img_name) label_path = img_path.replace("JPEGImages", "Annotations") label_path = label_path.replace(img_name.split('.')[-1], "xml") if not os.path.isfile(img_path): print("warning:VOC 图片文件'"+str(img_path)+"'不存在") continue if not os.path.isfile(label_path): print("warning:VOC 标签文件'"+str(label_path)+"'不存在") continue self.imgs_path.append(img_path) self.labels_path.append(label_path) self.num_imgs += 1 print("message:VOC 数据初始化完成,一共有 "+str(self.num_imgs)+" 张图片") if self.num_imgs <= 0: raise ValueError("没有可训练的图片, 程序退出") return # 读取图片 def read_img(self, img_file): """ 读取 img_file, 并 resize return:img, RGB & float """ if not os.path.exists(img_file): return None img = cv2.imread(img_file) img = cv2.resize(img, (self.width, self.height)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img.astype(np.float32) img = img/255.0 return img # 读取标签 def read_label(self, label_file, names_dict): """ 读取 label_file, 并生成 label_y1, label_y2, label_y3 return:label_y1, label_y2, label_y3 """ contents = parse_voc_xml(label_file, names_dict) if not contents: return None, None, None label_y1 = np.zeros((self.height // 32, self.width // 32, 3, 5 + self.class_num), np.float32) label_y2 = np.zeros((self.height // 16, self.width // 16, 3, 5 + self.class_num), np.float32) label_y3 = np.zeros((self.height // 8, self.width // 8, 3, 5 + self.class_num), np.float32) y_true = [label_y3, label_y2, label_y1] ratio = {0: 8, 1: 16, 2: 32} for label in contents: label_id = int(label[0]) box = np.asarray(label[1: 5]).astype(np.float32) # label中保存的就是 x,y,w,h best_giou = 0 best_index = 0 for i in range(len(self.anchors)): min_wh = np.minimum(box[2:4], self.anchors[i]) max_wh = np.maximum(box[2:4], self.anchors[i]) giou = (min_wh[0] * min_wh[1]) / (max_wh[0] * max_wh[1]) if giou > best_giou: best_giou = giou best_index = i # 012->0, 345->1, 678->2 x = int(np.floor(box[0] * self.width / ratio[best_index // 3])) y = int(np.floor(box[1] * self.height / ratio[best_index // 3])) k = best_index % 3 y_true[best_index // 3][y, x, k, 0:4] = box y_true[best_index // 3][y, x, k, 4:5] = 1.0 y_true[best_index // 3][y, x, k, 5 + label_id] = 1.0 return label_y1, label_y2, label_y3 # 加载 batch_size 的数据 def __get_data(self): """ 加载 batch_size 的标签和数据 return:imgs, label_y1, label_y2, label_y3 """ # 十个 batch 随机一次 size if self.multi_scale_img and (self.num_batch % 10 == 0): random_size = random.randint(10, 19) * 32 self.width = self.height = random_size imgs = [] labels_y1, labels_y2, labels_y3 = [], [], [] count = 0 while count < self.batch_size: curr_index = random.randint(0, self.num_imgs - 1) img_name = self.imgs_path[curr_index] label_name = self.labels_path[curr_index] img = self.read_img(img_name) label_y1, label_y2, label_y3 = self.read_label(label_name, self.names_dict) if img is None: print("VOC 文件'" + img_name + "'读取异常") continue if label_y1 is None: print("VOC 文件'" + label_name + "'读取异常") continue imgs.append(img) labels_y1.append(label_y1) labels_y2.append(label_y2) labels_y3.append(label_y3) count += 1 self.num_batch += 1 imgs = np.asarray(imgs) labels_y1 = np.asarray(labels_y1) labels_y2 = np.asarray(labels_y2) labels_y3 = np.asarray(labels_y3) return imgs, labels_y1, labels_y2, labels_y3 # 迭代器 def __next__(self): """ 迭代获得一个 batch 的数据 """ return self.__get_data()
, x1, y1, w1, h1], [id2, x2, y2, w2, h2], ... ] """ # print(file_name) # print(names_dict) result = [] if not os.path.isfile(file_name): return None doc = xml.dom.minidom.parse(file_name) root = doc.documentElement size = root.getElementsByTagName('size')[0] width = int(size.getElementsByTagName('width')[0].childNodes[0].data) height = int(size.getElementsByTagName('height')[0].childNodes[0].data) objs = root.getElementsByTagName('object') for obj in objs: name = obj.getElementsByTagName('name')[0].childNodes[0].data name_id = names_dict[name] bndbox = obj.getElementsByTagName('bndbox')[0] xmin = int(float(bndbox.getElementsByTagName('xmin')[0].childNodes[0].data)) ymin = int(float(bndbox.getElementsByTagName('ymin')[0].childNodes[0].data)) xmax = int(float(bndbox.getElementsByTagName('xmax')[0].childNodes[0].data)) ymax = int(float(bndbox.getElementsByTagName('ymax')[0].childNodes[0].data)) x = (xmax + xmin) / 2.0 / width w = (xmax - xmin) / width y = (ymax + ymin) / 2.0 / height h = (ymax - ymin) / height result.append([name_id, x, y, w, h]) return result class Data: def __init__(self, voc_root_dir, voc_dir_ls, voc_names, class_num, batch_size, anchors, mul
flac.py
""" FLAC (audio) parser Documentation: * http://flac.sourceforge.net/format.html Author: Esteban Loiseau <baal AT tuxfamily.org> Creation date: 2008-04-09 """ from hachoir_parser import Parser from hachoir_core.field import FieldSet, String, Bit, Bits, UInt16, UInt24, RawBytes, Enum, NullBytes from hachoir_core.stream import BIG_ENDIAN, LITTLE_ENDIAN from hachoir_core.tools import createDict from hachoir_parser.container.ogg import parseVorbisComment class VorbisComment(FieldSet): endian = LITTLE_ENDIAN createFields = parseVorbisComment class StreamInfo(FieldSet): static_size = 34*8 def createFields(self): yield UInt16(self, "min_block_size", "The minimum block size (in samples) used in the stream") yield UInt16(self, "max_block_size", "The maximum block size (in samples) used in the stream") yield UInt24(self, "min_frame_size", "The minimum frame size (in bytes) used in the stream") yield UInt24(self, "max_frame_size", "The maximum frame size (in bytes) used in the stream")
yield Bits(self, "total_samples", 36, "Total samples in stream") yield RawBytes(self, "md5sum", 16, "MD5 signature of the unencoded audio data") class SeekPoint(FieldSet): def createFields(self): yield Bits(self, "sample_number", 64, "Sample number") yield Bits(self, "offset", 64, "Offset in bytes") yield Bits(self, "nb_sample", 16) class SeekTable(FieldSet): def createFields(self): while not self.eof: yield SeekPoint(self, "point[]") class MetadataBlock(FieldSet): "Metadata block field: http://flac.sourceforge.net/format.html#metadata_block" BLOCK_TYPES = { 0: ("stream_info", u"Stream info", StreamInfo), 1: ("padding[]", u"Padding", None), 2: ("application[]", u"Application", None), 3: ("seek_table", u"Seek table", SeekTable), 4: ("comment", u"Vorbis comment", VorbisComment), 5: ("cue_sheet[]", u"Cue sheet", None), 6: ("picture[]", u"Picture", None), } BLOCK_TYPE_DESC = createDict(BLOCK_TYPES, 1) def __init__(self, *args, **kw): FieldSet.__init__(self, *args, **kw) self._size = 32 + self["metadata_length"].value * 8 try: key = self["block_type"].value self._name, self._description, self.handler = self.BLOCK_TYPES[key] except KeyError: self.handler = None def createFields(self): yield Bit(self, "last_metadata_block", "True if this is the last metadata block") yield Enum(Bits(self, "block_type", 7, "Metadata block header type"), self.BLOCK_TYPE_DESC) yield UInt24(self, "metadata_length", "Length of following metadata in bytes (doesn't include this header)") block_type = self["block_type"].value size = self["metadata_length"].value if not size: return try: handler = self.BLOCK_TYPES[block_type][2] except KeyError: handler = None if handler: yield handler(self, "content", size=size*8) elif self["block_type"].value == 1: yield NullBytes(self, "padding", size) else: yield RawBytes(self, "rawdata", size) class Metadata(FieldSet): def createFields(self): while not self.eof: field = MetadataBlock(self,"metadata_block[]") yield field if field["last_metadata_block"].value: break class Frame(FieldSet): SAMPLE_RATES = { 0: "get from STREAMINFO metadata block", 1: "88.2kHz", 2: "176.4kHz", 3: "192kHz", 4: "8kHz", 5: "16kHz", 6: "22.05kHz", 7: "24kHz", 8: "32kHz", 9: "44.1kHz", 10: "48kHz", 11: "96kHz", 12: "get 8 bit sample rate (in kHz) from end of header", 13: "get 16 bit sample rate (in Hz) from end of header", 14: "get 16 bit sample rate (in tens of Hz) from end of header", } def createFields(self): yield Bits(self, "sync", 14, "Sync code: 11111111111110") yield Bit(self, "reserved[]") yield Bit(self, "blocking_strategy") yield Bits(self, "block_size", 4) yield Enum(Bits(self, "sample_rate", 4), self.SAMPLE_RATES) yield Bits(self, "channel_assign", 4) yield Bits(self, "sample_size", 3) yield Bit(self, "reserved[]") # FIXME: Finish frame header parser class Frames(FieldSet): def createFields(self): while not self.eof: yield Frame(self, "frame[]") # FIXME: Parse all frames return class FlacParser(Parser): "Parse FLAC audio files: FLAC is a lossless audio codec" MAGIC = "fLaC\x00" PARSER_TAGS = { "id": "flac", "category": "audio", "file_ext": ("flac",), "mime": (u"audio/x-flac",), "magic": ((MAGIC, 0),), "min_size": 4*8, "description": "FLAC audio", } endian = BIG_ENDIAN def validate(self): if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC: return u"Invalid magic string" return True def createFields(self): yield String(self, "signature", 4,charset="ASCII", description="FLAC signature: fLaC string") yield Metadata(self,"metadata") yield Frames(self,"frames")
yield Bits(self, "sample_hertz", 20, "Sample rate in Hertz") yield Bits(self, "nb_channel", 3, "Number of channels minus one") yield Bits(self, "bits_per_sample", 5, "Bits per sample minus one")
resources.go
/* Copyright 2018 The Rook Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( rook "github.com/rook/rook/pkg/apis/rook.io/v1" v1 "k8s.io/api/core/v1" ) const ( // ResourcesKeyMon represents the name of resource in the CR for a mon ResourcesKeyMon = "mon" // ResourcesKeyMgr represents the name of resource in the CR for a mgr ResourcesKeyMgr = "mgr" // ResourcesKeyOSD represents the name of resource in the CR for an osd ResourcesKeyOSD = "osd" // ResourcesKeyPrepareOSD represents the name of resource in the CR for the osd prepare job ResourcesKeyPrepareOSD = "prepareosd" // ResourcesKeyMDS represents the name of resource in the CR for the mds ResourcesKeyMDS = "mds" // ResourcesKeyCrashCollector represents the name of resource in the CR for the crash ResourcesKeyCrashCollector = "crashcollector" // ResourcesKeyRBDMirror represents the name of resource in the CR for the rbd mirror ResourcesKeyRBDMirror = "rbdmirror" // ResourcesKeyCleanup represents the name of resource in the CR for the cleanup ResourcesKeyCleanup = "cleanup" ) // GetMgrResources returns the placement for the MGR service func GetMgrResources(p rook.ResourceSpec) v1.ResourceRequirements
// GetMonResources returns the placement for the monitors func GetMonResources(p rook.ResourceSpec) v1.ResourceRequirements { return p[ResourcesKeyMon] } // GetOSDResources returns the placement for the OSDs func GetOSDResources(p rook.ResourceSpec) v1.ResourceRequirements { return p[ResourcesKeyOSD] } // GetPrepareOSDResources returns the placement for the OSDs prepare job func GetPrepareOSDResources(p rook.ResourceSpec) v1.ResourceRequirements { return p[ResourcesKeyPrepareOSD] } // GetCrashCollectorResources returns the placement for the crash daemon func GetCrashCollectorResources(p rook.ResourceSpec) v1.ResourceRequirements { return p[ResourcesKeyCrashCollector] } // GetCleanupResources returns the placement for the cleanup job func GetCleanupResources(p rook.ResourceSpec) v1.ResourceRequirements { return p[ResourcesKeyCleanup] }
{ return p[ResourcesKeyMgr] }
bitcoin_pt_PT.ts
<?xml version="1.0" ?><!DOCTYPE TS><TS language="pt_PT" version="2.0"> <defaultcodec>UTF-8</defaultcodec> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About pokemoncoin</source> <translation>Sobre pokemoncoin</translation> </message> <message> <location line="+39"/> <source>&lt;b&gt;pokemoncoin&lt;/b&gt; version</source> <translation>Versão do &lt;b&gt;pokemoncoin&lt;/b&gt;</translation> </message> <message> <location line="+57"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation> Este é um programa experimental. Distribuído sob uma licença de software MIT/X11, por favor verifique o ficheiro anexo license.txt ou http://www.opensource.org/licenses/mit-license.php. Este produto inclui software desenvolvido pelo Projecto OpenSSL para uso no OpenSSL Toolkit (http://www.openssl.org/), software criptográfico escrito por Eric Young ([email protected]) e software UPnP escrito por Thomas Bernard.</translation> </message> <message> <location filename="../aboutdialog.cpp" line="+14"/> <source>Copyright</source> <translation>Copyright</translation> </message> <message> <location line="+0"/> <source>The pokemoncoin developers</source> <translation>Os programadores pokemoncoin</translation> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation>Livro de endereços</translation> </message> <message> <location line="+19"/> <source>Double-click to edit address or label</source> <translation>Clique duas vezes para editar o endereço ou o rótulo</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Criar um novo endereço</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Copie o endereço selecionado para a área de transferência</translation> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation>&amp;Novo Endereço</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+63"/> <source>These are your pokemoncoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation>Estes são os seus endereços pokemoncoin para receber pagamentos. Poderá enviar um endereço diferente para cada remetente para poder identificar os pagamentos.</translation> </message> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>&amp;Copy Address</source> <translation>&amp;Copiar Endereço</translation> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation>Mostrar Código &amp;QR</translation> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a pokemoncoin address</source> <translation>Assine uma mensagem para provar que é dono de um endereço pokemoncoin</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Assinar &amp;Mensagem</translation> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation>Apagar o endereço selecionado da lista</translation> </message> <message> <location line="+27"/> <source>Export the data in the current tab to a file</source> <translation>Exportar os dados no separador actual para um ficheiro</translation> </message> <message> <location line="+3"/> <source>&amp;Export</source> <translation>&amp;Exportar</translation> </message> <message> <location line="-44"/> <source>Verify a message to ensure it was signed with a specified pokemoncoin address</source> <translation>Verifique a mensagem para assegurar que foi assinada com o endereço pokemoncoin especificado</translation> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation>&amp;Verificar Mensagem</translation> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>E&amp;liminar</translation> </message> <message> <location filename="../addressbookpage.cpp" line="-5"/> <source>These are your pokemoncoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation>Estes são os seus endereços pokemoncoin para enviar pagamentos. Verifique sempre o valor e a morada de envio antes de enviar moedas.</translation> </message> <message> <location line="+13"/> <source>Copy &amp;Label</source> <translation>Copiar &amp;Rótulo</translation> </message> <message> <location line="+1"/> <source>&amp;Edit</source> <translation>&amp;Editar</translation> </message> <message> <location line="+1"/> <source>Send &amp;Coins</source> <translation>Enviar &amp;Moedas</translation> </message> <message> <location line="+260"/> <source>Export Address Book Data</source> <translation>Exportar dados do Livro de Endereços</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Ficheiro separado por vírgulas (*.csv)</translation> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation>Erro ao exportar</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>Não foi possível escrever para o ficheiro %1.</translation> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>Rótulo</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(Sem rótulo)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation>Diálogo de Frase-Passe</translation> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Escreva a frase de segurança</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Nova frase de segurança</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Repita a nova frase de segurança</translation> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+33"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Escreva a nova frase de seguraça da sua carteira. &lt;br/&gt; Por favor, use uma frase de &lt;b&gt;10 ou mais caracteres aleatórios,&lt;/b&gt; ou &lt;b&gt;oito ou mais palavras&lt;/b&gt;.</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>Encriptar carteira</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>A sua frase de segurança é necessária para desbloquear a carteira.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Desbloquear carteira</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>A sua frase de segurança é necessária para desencriptar a carteira.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Desencriptar carteira</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Alterar frase de segurança</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Escreva a frase de segurança antiga seguida da nova para a carteira.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>Confirmar encriptação da carteira</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR LITECOINS&lt;/b&gt;!</source> <translation>Atenção: Se encriptar a carteira e perder a sua senha irá &lt;b&gt;PERDER TODOS OS SEUS LITECOINS&lt;/b&gt;!</translation> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Tem a certeza que deseja encriptar a carteira?</translation> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation>IMPORTANTE: Qualquer cópia de segurança anterior da carteira deverá ser substituída com o novo, actualmente encriptado, ficheiro de carteira. Por razões de segurança, cópias de segurança não encriptadas efectuadas anteriormente do ficheiro da carteira tornar-se-ão inúteis assim que começar a usar a nova carteira encriptada.</translation> </message> <message> <location line="+100"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation>Atenção: A tecla Caps Lock está activa!</translation> </message> <message> <location line="-130"/> <location line="+58"/> <source>Wallet encrypted</source> <translation>Carteira encriptada</translation> </message> <message> <location line="-56"/> <source>pokemoncoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your pokemoncoins from being stolen by malware infecting your computer.</source> <translation>O cliente pokemoncoin irá agora ser fechado para terminar o processo de encriptação. Recorde que a encriptação da sua carteira não protegerá totalmente os seus pokemoncoins de serem roubados por programas maliciosos que infectem o seu computador.</translation> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+42"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>A encriptação da carteira falhou</translation> </message> <message> <location line="-54"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>A encriptação da carteira falhou devido a um erro interno. A carteira não foi encriptada.</translation> </message> <message> <location line="+7"/> <location line="+48"/> <source>The supplied passphrases do not match.</source> <translation>As frases de segurança fornecidas não coincidem.</translation> </message> <message> <location line="-37"/> <source>Wallet unlock failed</source> <translation>O desbloqueio da carteira falhou</translation> </message> <message> <location line="+1"/> <location line="+11"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>A frase de segurança introduzida para a desencriptação da carteira estava incorreta.</translation> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>A desencriptação da carteira falhou</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation>A frase de segurança da carteira foi alterada com êxito.</translation> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+233"/> <source>Sign &amp;message...</source> <translation>Assinar &amp;mensagem...</translation> </message> <message> <location line="+280"/> <source>Synchronizing with network...</source> <translation>Sincronizando com a rede...</translation> </message> <message> <location line="-349"/> <source>&amp;Overview</source> <translation>Visã&amp;o geral</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>Mostrar visão geral da carteira</translation> </message> <message> <location line="+20"/> <source>&amp;Transactions</source> <translation>&amp;Transações</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Navegar pelo histórico de transações</translation> </message> <message> <location line="+7"/> <source>Edit the list of stored addresses and labels</source> <translation>Editar a lista de endereços e rótulos</translation> </message> <message> <location line="-14"/> <source>Show the list of addresses for receiving payments</source> <translation>Mostrar a lista de endereços para receber pagamentos</translation> </message> <message> <location line="+31"/> <source>E&amp;xit</source> <translation>Fec&amp;har</translation> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Sair da aplicação</translation> </message> <message> <location line="+4"/> <source>Show information about pokemoncoin</source> <translation>Mostrar informação sobre pokemoncoin</translation> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation>Sobre &amp;Qt</translation> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation>Mostrar informação sobre Qt</translation> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Opções...</translation> </message> <message> <location line="+6"/> <source>&amp;Encrypt Wallet...</source> <translation>E&amp;ncriptar Carteira...</translation> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation>&amp;Guardar Carteira...</translation> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation>Mudar &amp;Palavra-passe...</translation> </message> <message> <location line="+285"/> <source>Importing blocks from disk...</source> <translation>Importando blocos do disco...</translation> </message> <message> <location line="+3"/> <source>Reindexing blocks on disk...</source> <translation>Reindexando blocos no disco...</translation> </message> <message> <location line="-347"/> <source>Send coins to a pokemoncoin address</source> <translation>Enviar moedas para um endereço pokemoncoin</translation> </message> <message> <location line="+49"/> <source>Modify configuration options for pokemoncoin</source> <translation>Modificar opções de configuração para pokemoncoin</translation> </message> <message> <location line="+9"/> <source>Backup wallet to another location</source> <translation>Faça uma cópia de segurança da carteira para outra localização</translation> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Mudar a frase de segurança utilizada na encriptação da carteira</translation> </message> <message> <location line="+6"/> <source>&amp;Debug window</source> <translation>Janela de &amp;depuração</translation> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation>Abrir consola de diagnóstico e depuração</translation> </message> <message> <location line="-4"/> <source>&amp;Verify message...</source> <translation>&amp;Verificar mensagem...</translation> </message> <message> <location line="-165"/> <location line="+530"/> <source>pokemoncoin</source> <translation>pokemoncoin</translation> </message> <message> <location line="-530"/> <source>Wallet</source> <translation>Carteira</translation> </message> <message> <location line="+101"/> <source>&amp;Send</source> <translation>&amp;Enviar</translation> </message> <message> <location line="+7"/> <source>&amp;Receive</source> <translation>&amp;Receber</translation> </message> <message> <location line="+14"/> <source>&amp;Addresses</source> <translation>E&amp;ndereços</translation> </message> <message> <location line="+22"/> <source>&amp;About pokemoncoin</source> <translation>&amp;Sobre o pokemoncoin</translation> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation>Mo&amp;strar / Ocultar</translation> </message> <message> <location line="+1"/> <source>Show or hide the main Window</source> <translation>Mostrar ou esconder a Janela principal</translation> </message> <message> <location line="+3"/> <source>Encrypt the private keys that belong to your wallet</source> <translation>Encriptar as chaves privadas que pertencem à sua carteira</translation> </message> <message> <location line="+7"/> <source>Sign messages with your pokemoncoin addresses to prove you own them</source> <translation>Assine mensagens com os seus endereços pokemoncoin para provar que os controla</translation> </message> <message> <location line="+2"/> <source>Verify messages to ensure they were signed with specified pokemoncoin addresses</source> <translation>Verifique mensagens para assegurar que foram assinadas com o endereço pokemoncoin especificado</translation> </message> <message> <location line="+28"/> <source>&amp;File</source> <translation>&amp;Ficheiro</translation> </message> <message> <location line="+7"/> <source>&amp;Settings</source> <translation>Con&amp;figurações</translation> </message> <message> <location line="+6"/> <source>&amp;Help</source> <translation>A&amp;juda</translation> </message> <message> <location line="+9"/> <source>Tabs toolbar</source> <translation>Barra de separadores</translation> </message> <message> <location line="+17"/> <location line="+10"/> <source>[testnet]</source> <translation>[rede de testes]</translation> </message> <message> <location line="+47"/> <source>pokemoncoin client</source> <translation>Cliente pokemoncoin</translation> </message> <message numerus="yes"> <location line="+141"/> <source>%n active connection(s) to pokemoncoin network</source> <translation><numerusform>%n ligação ativa à rede pokemoncoin</numerusform><numerusform>%n ligações ativas à rede pokemoncoin</numerusform></translation> </message> <message> <location line="+22"/> <source>No block source available...</source> <translation>Nenhum bloco fonto disponível</translation> </message> <message> <location line="+12"/> <source>Processed %1 of %2 (estimated) blocks of transaction history.</source> <translation>Processados %1 dos %2 blocos (estimados) do histórico de transacções.</translation> </message> <message> <location line="+4"/> <source>Processed %1 blocks of transaction history.</source> <translation>Processados %1 blocos do histórico de transações.</translation> </message> <message numerus="yes"> <location line="+20"/> <source>%n hour(s)</source> <translation><numerusform>%n hora</numerusform><numerusform>%n horas</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation><numerusform>%n dia</numerusform><numerusform>%n dias</numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n week(s)</source> <translation><numerusform>%n semana</numerusform><numerusform>%n semanas</numerusform></translation> </message> <message> <location line="+4"/> <source>%1 behind</source> <translation>%1 em atraso</translation> </message> <message> <location line="+14"/> <source>Last received block was generated %1 ago.</source> <translation>Último bloco recebido foi gerado há %1 atrás.</translation> </message> <message> <location line="+2"/> <source>Transactions after this will not yet be visible.</source> <translation>Transações posteriores poderão não ser imediatamente visíveis.</translation> </message> <message> <location line="+22"/> <source>Error</source> <translation>Erro</translation> </message> <message> <location line="+3"/> <source>Warning</source> <translation>Aviso</translation> </message> <message> <location line="+3"/> <source>Information</source> <translation>Informação</translation> </message> <message> <location line="+70"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation>Esta transação tem um tamanho superior ao limite máximo. Poderá enviá-la pagando uma taxa de %1, que será entregue ao nó que processar a sua transação e ajudará a suportar a rede. Deseja pagar a taxa?</translation> </message> <message> <location line="-140"/> <source>Up to date</source> <translation>Atualizado</translation> </message> <message> <location line="+31"/> <source>Catching up...</source> <translation>Recuperando...</translation> </message> <message> <location line="+113"/> <source>Confirm transaction fee</source> <translation>Confirme a taxa de transação</translation> </message> <message> <location line="+8"/> <source>Sent transaction</source> <translation>Transação enviada</translation> </message> <message> <location line="+0"/> <source>Incoming transaction</source> <translation>Transação recebida</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Data: %1 Quantia: %2 Tipo: %3 Endereço: %4 </translation> </message> <message> <location line="+33"/> <location line="+23"/> <source>URI handling</source> <translation>Manuseamento URI</translation> </message> <message> <location line="-23"/> <location line="+23"/> <source>URI can not be parsed! This can be caused by an invalid pokemoncoin address or malformed URI parameters.</source> <translation>URI não foi lido correctamente! Isto pode ser causado por um endereço pokemoncoin inválido ou por parâmetros URI malformados.</translation> </message> <message> <location line="+17"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>A carteira está &lt;b&gt;encriptada&lt;/b&gt; e atualmente &lt;b&gt;desbloqueada&lt;/b&gt;</translation> </message> <message> <location line="+8"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>A carteira está &lt;b&gt;encriptada&lt;/b&gt; e atualmente &lt;b&gt;bloqueada&lt;/b&gt;</translation> </message> <message> <location filename="../bitcoin.cpp" line="+111"/> <source>A fatal error occurred. pokemoncoin can no longer continue safely and will quit.</source> <translation>Ocorreu um erro fatal. O pokemoncoin não pode continuar com segurança e irá fechar.</translation> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+104"/> <source>Network Alert</source> <translation>Alerta da Rede</translation> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Editar Endereço</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Rótulo</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation>O rótulo a ser associado com esta entrada do livro de endereços</translation> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>E&amp;ndereço</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation>O endereço associado com esta entrada do livro de endereços. Apenas poderá ser modificado para endereços de saída.</translation> </message> <message> <location filename="../editaddressdialog.cpp" line="+21"/> <source>New receiving address</source> <translation>Novo endereço de entrada</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Novo endereço de saída</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Editar endereço de entrada</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Editar endereço de saída</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>O endereço introduzido &quot;%1&quot; já se encontra no livro de endereços.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid pokemoncoin address.</source> <translation>O endereço introduzido &quot;%1&quot; não é um endereço pokemoncoin válido.</translation> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Impossível desbloquear carteira.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Falha ao gerar nova chave.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+424"/> <location line="+12"/> <source>pokemoncoin-Qt</source> <translation>pokemoncoin-Qt</translation> </message> <message> <location line="-12"/> <source>version</source> <translation>versão</translation> </message> <message> <location line="+2"/> <source>Usage:</source> <translation>Utilização:</translation> </message> <message> <location line="+1"/> <source>command-line options</source> <translation>opções da linha de comandos</translation> </message> <message> <location line="+4"/> <source>UI options</source> <translation>Opções de UI</translation> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation>Definir linguagem, por exemplo &quot;pt_PT&quot; (por defeito: linguagem do sistema)</translation> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation>Iniciar minimizado</translation> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation>Mostrar animação ao iniciar (por defeito: 1)</translation> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Opções</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation>&amp;Principal</translation> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source> <translation>Taxa de transação opcional por KB que ajuda a assegurar que as suas transações serão processadas rapidamente. A maioria das transações tem 1 kB.</translation> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation>Pagar &amp;taxa de transação</translation> </message> <message> <location line="+31"/> <source>Automatically start pokemoncoin after logging in to the system.</source> <translation>Começar o pokemoncoin automaticamente ao iniciar sessão no sistema.</translation> </message> <message> <location line="+3"/> <source>&amp;Start pokemoncoin on system login</source> <translation>&amp;Começar o pokemoncoin ao iniciar o sistema</translation> </message> <message> <location line="+35"/> <source>Reset all client options to default.</source> <translation>Repôr todas as opções.</translation> </message> <message> <location line="+3"/> <source>&amp;Reset Options</source> <translation>&amp;Repôr Opções</translation> </message> <message> <location line="+13"/> <source>&amp;Network</source> <translation>&amp;Rede</translation> </message> <message> <location line="+6"/> <source>Automatically open the pokemoncoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Abrir a porta do cliente pokemoncoin automaticamente no seu router. Isto penas funciona se o seu router suportar UPnP e este se encontrar ligado.</translation> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation>Mapear porta usando &amp;UPnP</translation> </message> <message> <location line="+7"/> <source>Connect to the pokemoncoin network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation>Ligar à rede pokemoncoin através de um proxy SOCKS (p.ex. quando ligar através de Tor).</translation> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation>Ligar através de proxy SO&amp;CKS:</translation> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation>&amp;IP do proxy:</translation> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation>Endereço IP do proxy (p.ex. 127.0.0.1)</translation> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation>&amp;Porta:</translation> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation>Porta do proxy (p.ex. 9050)</translation> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation>&amp;Versão SOCKS:</translation> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>Versão do proxy SOCKS (p.ex. 5)</translation> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation>&amp;Janela</translation> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation>Apenas mostrar o ícone da bandeja após minimizar a janela.</translation> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;Minimizar para a bandeja e não para a barra de ferramentas</translation> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Minimize ao invés de sair da aplicação quando a janela é fechada. Com esta opção selecionada, a aplicação apenas será encerrada quando escolher Sair da aplicação no menú.</translation> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation>M&amp;inimizar ao fechar</translation> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation>Vis&amp;ualização</translation> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation>&amp;Linguagem da interface de utilizador:</translation> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting pokemoncoin.</source> <translation>A linguagem da interface do utilizador pode ser definida aqui. Esta definição entrará em efeito após reiniciar o pokemoncoin.</translation> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Unidade a usar em quantias:</translation> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Escolha a subdivisão unitária a ser mostrada por defeito na aplicação e ao enviar moedas.</translation> </message> <message> <location line="+9"/> <source>Whether to show pokemoncoin addresses in the transaction list or not.</source> <translation>Se mostrar, ou não, os endereços pokemoncoin na lista de transações.</translation> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation>Mostrar en&amp;dereços na lista de transações</translation> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation>&amp;OK</translation> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation>&amp;Cancelar</translation> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation>&amp;Aplicar</translation> </message> <message> <location filename="../optionsdialog.cpp" line="+53"/> <source>default</source> <translation>padrão</translation> </message> <message> <location line="+130"/> <source>Confirm options reset</source> <translation>Confirme a reposição de opções</translation> </message> <message> <location line="+1"/> <source>Some settings may require a client restart to take effect.</source> <translation>Algumas opções requerem o reinício do programa para funcionar.</translation> </message> <message> <location line="+0"/> <source>Do you want to proceed?</source> <translation>Deseja proceder?</translation> </message> <message> <location line="+42"/> <location line="+9"/> <source>Warning</source> <translation>Aviso</translation> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting pokemoncoin.</source> <translation>Esta opção entrará em efeito após reiniciar o pokemoncoin.</translation> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation>O endereço de proxy introduzido é inválido. </translation> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Formulário</translation> </message> <message> <location line="+50"/> <location line="+166"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the pokemoncoin network after a connection is established, but this process has not completed yet.</source> <translation>A informação mostrada poderá estar desatualizada. A sua carteira sincroniza automaticamente com a rede pokemoncoin depois de estabelecer ligação, mas este processo ainda não está completo.</translation> </message> <message> <location line="-124"/> <source>Balance:</source> <translation>Saldo:</translation> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation>Não confirmado:</translation> </message> <message> <location line="-78"/> <source>Wallet</source> <translation>Carteira</translation> </message> <message> <location line="+107"/> <source>Immature:</source> <translation>Imaturo:</translation> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation>O saldo minado ainda não maturou</translation> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Transações recentes&lt;/b&gt;</translation> </message> <message> <location line="-101"/> <source>Your current balance</source> <translation>O seu saldo atual</translation> </message> <message> <location line="+29"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation>Total de transações ainda não confirmadas, e que não estão contabilizadas ainda no seu saldo actual</translation> </message> <message> <location filename="../overviewpage.cpp" line="+116"/> <location line="+1"/> <source>out of sync</source> <translation>fora de sincronia</translation> </message> </context> <context> <name>PaymentServer</name> <message> <location filename="../paymentserver.cpp" line="+107"/> <source>Cannot start pokemoncoin: click-to-pay handler</source> <translation>Impossível começar o modo clicar-para-pagar com pokemoncoin:</translation> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation>Diálogo de Código QR</translation> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation>Requisitar Pagamento</translation> </message> <message> <location line="+56"/> <source>Amount:</source> <translation>Quantia:</translation> </message> <message> <location line="-44"/> <source>Label:</source> <translation>Rótulo:</translation> </message> <message> <location line="+19"/> <source>Message:</source> <translation>Mensagem:</translation> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation>&amp;Salvar Como...</translation> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation>Erro ao codificar URI em Código QR.</translation> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation>A quantia introduzida é inválida, por favor verifique.</translation> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation>URI resultante muito longo. Tente reduzir o texto do rótulo / mensagem.</translation> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation>Guardar Código QR</translation> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation>Imagens PNG (*.png)</translation> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation>Nome do Cliente</translation> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+339"/> <source>N/A</source> <translation>N/D</translation> </message> <message> <location line="-217"/> <source>Client version</source> <translation>Versão do Cliente</translation> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation>&amp;Informação</translation> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation>Usando versão OpenSSL</translation> </message> <message> <location line="+49"/> <source>Startup time</source> <translation>Tempo de início</translation> </message> <message> <location line="+29"/> <source>Network</source> <translation>Rede</translation> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation>Número de ligações</translation> </message> <message> <location line="+23"/> <source>On testnet</source> <translation>Em rede de testes</translation> </message> <message> <location line="+23"/> <source>Block chain</source> <translation>Cadeia de blocos</translation> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation>Número actual de blocos</translation> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation>Total estimado de blocos</translation> </message> <message> <location line="+23"/> <source>Last block time</source> <translation>Tempo do último bloco</translation> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation>&amp;Abrir</translation> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation>Opções de linha de comandos</translation> </message> <message> <location line="+7"/> <source>Show the pokemoncoin-Qt help message to get a list with possible pokemoncoin command-line options.</source> <translation>Mostrar a mensagem de ajuda do pokemoncoin-Qt para obter uma lista com possíveis opções a usar na linha de comandos.</translation> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation>Mo&amp;strar</translation> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation>&amp;Consola</translation> </message> <message> <location line="-260"/> <source>Build date</source> <translation>Data de construção</translation> </message> <message> <location line="-104"/> <source>pokemoncoin - Debug window</source> <translation>pokemoncoin - Janela de depuração</translation> </message> <message> <location line="+25"/> <source>pokemoncoin Core</source> <translation>Núcleo pokemoncoin</translation> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation>Ficheiro de registo de depuração</translation> </message> <message> <location line="+7"/> <source>Open the pokemoncoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation>Abrir o ficheiro de registo de depuração da pasta de dados actual. Isto pode demorar alguns segundos para ficheiros de registo maiores.</translation> </message> <message> <location line="+102"/> <source>Clear console</source> <translation>Limpar consola</translation> </message> <message> <location filename="../rpcconsole.cpp" line="-30"/> <source>Welcome to the pokemoncoin RPC console.</source> <translation>Bem-vindo à consola RPC pokemoncoin.</translation> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation>Use as setas para cima e para baixo para navegar no histórico e &lt;b&gt;Ctrl-L&lt;/b&gt; para limpar o ecrã.</translation> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation>Digite &lt;b&gt;help&lt;/b&gt; para visualizar os comandos disponíveis.</translation> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+124"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Enviar Moedas</translation> </message> <message> <location line="+50"/> <source>Send to multiple recipients at once</source> <translation>Enviar para múltiplos destinatários de uma vez</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation>Adicionar &amp;Destinatário</translation> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation>Remover todos os campos da transação</translation> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation>&amp;Limpar Tudo</translation> </message> <message> <location line="+22"/> <source>Balance:</source> <translation>Saldo:</translation> </message> <message> <location line="+10"/> <source>123.456 BTC</source> <translation>123.456 BTC</translation> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>Confirme ação de envio</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation>&amp;Enviar</translation> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-59"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation>&lt;b&gt;%1&lt;/b&gt; para %2 (%3)</translation> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation>Confirme envio de moedas</translation> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation>Tem a certeza que deseja enviar %1?</translation> </message> <message> <location line="+0"/> <source> and </source> <translation> e </translation> </message> <message> <location line="+23"/> <source>The recipient address is not valid, please recheck.</source> <translation>O endereço de destino não é válido, por favor verifique.</translation> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation>A quantia a pagar deverá ser maior que 0.</translation> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation>A quantia excede o seu saldo.</translation> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>O total excede o seu saldo quando a taxa de transação de %1 for incluída.</translation> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>Endereço duplicado encontrado, apenas poderá enviar uma vez para cada endereço por cada operação de envio.</translation> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed!</source> <translation>Erro: A criação da transacção falhou! </translation> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Erro: A transação foi rejeitada. Isso poderá acontecer se algumas das moedas na sua carteira já tiverem sido gastas, se por exemplo tiver usado uma cópia do ficheiro wallet.dat e as moedas foram gastas na cópia mas não foram marcadas como gastas aqui.</translation> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation>Formulário</translation> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>Qu&amp;antia:</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation>&amp;Pagar A:</translation> </message> <message> <location line="+34"/> <source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>O endereço para onde enviar o pagamento (p.ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> <message> <location line="+60"/> <location filename="../sendcoinsentry.cpp" line="+26"/> <source>Enter a label for this address to add it to your address book</source> <translation>Escreva um rótulo para este endereço para o adicionar ao seu livro de endereços</translation> </message> <message> <location line="-78"/> <source>&amp;Label:</source> <translation>Rótu&amp;lo:</translation> </message> <message> <location line="+28"/> <source>Choose address from address book</source> <translation>Escolher endereço do livro de endereços</translation> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Cole endereço da área de transferência</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation>Remover este destinatário</translation> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a pokemoncoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>Introduza um endereço pokemoncoin (p.ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation>Assinaturas - Assinar / Verificar uma Mensagem</translation> </message> <message> <location line="+13"/> <source>&amp;Sign Message</source> <translation>A&amp;ssinar Mensagem</translation> </message> <message> <location line="+6"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation>Pode assinar mensagens com os seus endereços para provar que são seus. Tenha atenção ao assinar mensagens ambíguas, pois ataques de phishing podem tentar enganá-lo, de modo a assinar a sua identidade para os atacantes. Apenas assine declarações completamente detalhadas com as quais concorde.</translation> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>O endereço a utilizar para assinar a mensagem (p.ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> <message> <location line="+10"/> <location line="+213"/> <source>Choose an address from the address book</source> <translation>Escolher endereço do livro de endereços</translation> </message> <message> <location line="-203"/> <location line="+213"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-203"/> <source>Paste address from clipboard</source> <translation>Cole endereço da área de transferência</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation>Escreva aqui a mensagem que deseja assinar</translation> </message> <message> <location line="+7"/> <source>Signature</source> <translation>Assinatura</translation> </message> <message> <location line="+27"/> <source>Copy the current signature to the system clipboard</source> <translation>Copiar a assinatura actual para a área de transferência</translation> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this pokemoncoin address</source> <translation>Assine uma mensagem para provar que é dono deste endereço pokemoncoin</translation> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation>Assinar &amp;Mensagem</translation> </message> <message> <location line="+14"/> <source>Reset all sign message fields</source> <translation>Repôr todos os campos de assinatura de mensagem</translation> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation>Limpar &amp;Tudo</translation> </message> <message> <location line="-87"/> <source>&amp;Verify Message</source> <translation>&amp;Verificar Mensagem</translation> </message> <message> <location line="+6"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation>Introduza o endereço de assinatura, mensagem (assegure-se de copiar quebras de linha, espaços, tabuladores, etc. exactamente) e assinatura abaixo para verificar a mensagem. Tenha atenção para não ler mais na assinatura do que o que estiver na mensagem assinada, para evitar ser enganado por um atacante que se encontre entre si e quem assinou a mensagem.</translation> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>O endereço utilizado para assinar a mensagem (p.ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified pokemoncoin address</source> <translation>Verifique a mensagem para assegurar que foi assinada com o endereço pokemoncoin especificado</translation> </message> <message> <location line="+3"/> <source>Verify &amp;Message</source> <translation>Verificar &amp;Mensagem</translation> </message> <message> <location line="+14"/> <source>Reset all verify message fields</source> <translation>Repôr todos os campos de verificação de mensagem</translation> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a pokemoncoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source> <translation>Introduza um endereço pokemoncoin (p.ex. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Clique &quot;Assinar mensagem&quot; para gerar a assinatura</translation> </message> <message> <location line="+3"/> <source>Enter pokemoncoin signature</source> <translation>Introduza assinatura pokemoncoin</translation> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation>O endereço introduzido é inválido. </translation> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation>Por favor verifique o endereço e tente de novo.</translation> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation>O endereço introduzido não refere a chave alguma.</translation> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation>O desbloqueio da carteira foi cancelado.</translation> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation>A chave privada para o endereço introduzido não está disponível.</translation> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation>Assinatura de mensagem falhou.</translation> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation>Mensagem assinada.</translation> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation>A assinatura não pôde ser descodificada.</translation> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation>Por favor verifique a assinatura e tente de novo.</translation> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation>A assinatura não condiz com o conteúdo da mensagem.</translation> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation>Verificação da mensagem falhou.</translation> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation>Mensagem verificada.</translation> </message> </context> <context> <name>SplashScreen</name> <message> <location filename="../splashscreen.cpp" line="+22"/> <source>The pokemoncoin developers</source> <translation>Os programadores pokemoncoin</translation> </message> <message> <location line="+1"/> <source>[testnet]</source> <translation>[rede de testes]</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+20"/> <source>Open until %1</source> <translation>Aberto até %1</translation> </message> <message> <location line="+6"/> <source>%1/offline</source> <translation>%1/desligado</translation> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation>%1/não confirmada</translation> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation>%1 confirmações</translation> </message> <message> <location line="+18"/> <source>Status</source> <translation>Estado</translation> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation><numerusform>, transmitida através de %n nó</numerusform><numerusform>, transmitida através de %n nós</numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation>Origem</translation> </message> <message> <location line="+0"/> <source>Generated</source> <translation>Gerado</translation> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation>De</translation> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation>Para</translation> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation>endereço próprio</translation> </message> <message> <location line="-2"/> <source>label</source> <translation>rótulo</translation> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation>Crédito</translation> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation><numerusform>matura daqui por %n bloco</numerusform><numerusform>matura daqui por %n blocos</numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation>não aceite</translation> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation>Débito</translation> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation>Taxa de transação</translation> </message> <message> <location line="+16"/> <source>Net amount</source> <translation>Valor líquido</translation> </message> <message> <location line="+6"/> <source>Message</source> <translation>Mensagem</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation>Comentário</translation> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation>ID da Transação</translation> </message> <message> <location line="+3"/> <source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation>Moedas geradas deverão maturar por 120 blocos antes de poderem ser gastas. Quando gerou este bloco, ele foi transmitido para a rede para ser incluído na cadeia de blocos. Se a inclusão na cadeia de blocos falhar, irá mudar o estado para &quot;não aceite&quot; e as moedas não poderão ser gastas. Isto poderá acontecer ocasionalmente se outro nó da rede gerar um bloco a poucos segundos de diferença do seu.</translation> </message> <message> <location line="+7"/> <source>Debug information</source> <translation>Informação de depuração</translation> </message> <message> <location line="+8"/> <source>Transaction</source> <translation>Transação</translation> </message> <message> <location line="+3"/> <source>Inputs</source> <translation>Entradas</translation> </message> <message> <location line="+23"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+1"/> <source>true</source> <translation>verdadeiro</translation> </message> <message> <location line="+0"/> <source>false</source> <translation>falso</translation> </message> <message> <location line="-209"/> <source>, has not been successfully broadcast yet</source> <translation>, ainda não foi transmitida com sucesso</translation> </message> <message numerus="yes"> <location line="-35"/> <source>Open for %n more block(s)</source> <translation><numerusform>Aberta por mais %n bloco</numerusform><numerusform>Aberta por mais %n blocos</numerusform></translation> </message> <message> <location line="+70"/> <source>unknown</source> <translation>desconhecido</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation>Detalhes da transação</translation> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation>Esta janela mostra uma descrição detalhada da transação</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+225"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Tipo</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation>Quantia</translation> </message> <message numerus="yes"> <location line="+57"/> <source>Open for %n more block(s)</source> <translation><numerusform>Aberta por mais %n bloco</numerusform><numerusform>Aberta por mais %n blocos</numerusform></translation> </message> <message> <location line="+3"/> <source>Open until %1</source> <translation>Aberto até %1</translation> </message> <message> <location line="+3"/> <source>Offline (%1 confirmations)</source> <translation>Desligado (%1 confirmação)</translation> </message> <message> <location line="+3"/> <source>Unconfirmed (%1 of %2 confirmations)</source> <translation>Não confirmada (%1 de %2 confirmações)</translation> </message> <message> <location line="+3"/> <source>Confirmed (%1 confirmations)</source> <translation>Confirmada (%1 confirmação)</translation> </message> <message numerus="yes"> <location line="+8"/> <source>Mined balance will be available when it matures in %n more block(s)</source> <translation><numerusform>Saldo minado ficará disponível quando maturar, daqui por %n bloco</numerusform><numerusform>Saldo minado ficará disponível quando maturar, daqui por %n blocos</numerusform></translation> </message> <message> <location line="+5"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Este bloco não foi recebido por outros nós e provavelmente não será aceite pela rede!</translation> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation>Gerado mas não aceite</translation> </message> <message> <location line="+43"/> <source>Received with</source> <translation>Recebido com</translation> </message> <message> <location line="+2"/> <source>Received from</source> <translation>Recebido de</translation> </message> <message> <location line="+3"/> <source>Sent to</source> <translation>Enviado para</translation> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation>Pagamento ao próprio</translation> </message> <message> <location line="+2"/> <source>Mined</source> <translation>Minado</translation> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation>(n/d)</translation> </message> <message> <location line="+199"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Estado da transação. Pairar por cima deste campo para mostrar o número de confirmações.</translation> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation>Data e hora a que esta transação foi recebida.</translation> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation>Tipo de transação.</translation> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation>Endereço de destino da transação.</translation> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation>Quantia retirada ou adicionada ao saldo.</translation> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+52"/> <location line="+16"/> <source>All</source> <translation>Todas</translation> </message> <message> <location line="-15"/> <source>Today</source> <translation>Hoje</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation>Esta semana</translation> </message> <message> <location line="+1"/> <source>This month</source> <translation>Este mês</translation> </message> <message> <location line="+1"/> <source>Last month</source> <translation>Mês passado</translation> </message> <message> <location line="+1"/> <source>This year</source> <translation>Este ano</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation>Período...</translation> </message> <message> <location line="+11"/> <source>Received with</source> <translation>Recebida com</translation> </message> <message> <location line="+2"/> <source>Sent to</source> <translation>Enviada para</translation> </message> <message> <location line="+2"/> <source>To yourself</source> <translation>Para si</translation> </message> <message> <location line="+1"/> <source>Mined</source> <translation>Minadas</translation> </message> <message> <location line="+1"/> <source>Other</source> <translation>Outras</translation> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation>Escreva endereço ou rótulo a procurar</translation> </message> <message> <location line="+7"/> <source>Min amount</source> <translation>Quantia mínima</translation> </message> <message> <location line="+34"/> <source>Copy address</source> <translation>Copiar endereço</translation> </message> <message> <location line="+1"/> <source>Copy label</source> <translation>Copiar rótulo</translation> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation>Copiar quantia</translation> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation>Copiar ID da Transação</translation> </message> <message> <location line="+1"/> <source>Edit label</source> <translation>Editar rótulo</translation> </message> <message> <location line="+1"/>
<location line="+139"/> <source>Export Transaction Data</source> <translation>Exportar Dados das Transações</translation> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation>Ficheiro separado por vírgula (*.csv)</translation> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation>Confirmada</translation> </message> <message> <location line="+1"/> <source>Date</source> <translation>Data</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Tipo</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Rótulo</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Endereço</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation>Quantia</translation> </message> <message> <location line="+1"/> <source>ID</source> <translation>ID</translation> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation>Erro ao exportar</translation> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation>Impossível escrever para o ficheiro %1.</translation> </message> <message> <location line="+100"/> <source>Range:</source> <translation>Período:</translation> </message> <message> <location line="+8"/> <source>to</source> <translation>até</translation> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+193"/> <source>Send Coins</source> <translation>Enviar Moedas</translation> </message> </context> <context> <name>WalletView</name> <message> <location filename="../walletview.cpp" line="+42"/> <source>&amp;Export</source> <translation>&amp;Exportar</translation> </message> <message> <location line="+1"/> <source>Export the data in the current tab to a file</source> <translation>Exportar os dados no separador actual para um ficheiro</translation> </message> <message> <location line="+193"/> <source>Backup Wallet</source> <translation>Cópia de Segurança da Carteira</translation> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation>Dados da Carteira (*.dat)</translation> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation>Cópia de Segurança Falhou</translation> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation>Ocorreu um erro ao tentar guardar os dados da carteira na nova localização.</translation> </message> <message> <location line="+4"/> <source>Backup Successful</source> <translation>Cópia de Segurança Bem Sucedida</translation> </message> <message> <location line="+0"/> <source>The wallet data was successfully saved to the new location.</source> <translation>Os dados da carteira foram salvos com sucesso numa nova localização.</translation> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+94"/> <source>pokemoncoin version</source> <translation>Versão pokemoncoin</translation> </message> <message> <location line="+102"/> <source>Usage:</source> <translation>Utilização:</translation> </message> <message> <location line="-29"/> <source>Send command to -server or pokemoncoind</source> <translation>Enviar comando para -server ou pokemoncoind</translation> </message> <message> <location line="-23"/> <source>List commands</source> <translation>Listar comandos</translation> </message> <message> <location line="-12"/> <source>Get help for a command</source> <translation>Obter ajuda para um comando</translation> </message> <message> <location line="+24"/> <source>Options:</source> <translation>Opções:</translation> </message> <message> <location line="+24"/> <source>Specify configuration file (default: pokemoncoin.conf)</source> <translation>Especificar ficheiro de configuração (por defeito: pokemoncoin.conf)</translation> </message> <message> <location line="+3"/> <source>Specify pid file (default: pokemoncoind.pid)</source> <translation>Especificar ficheiro pid (por defeito: pokemoncoind.pid)</translation> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation>Especificar pasta de dados</translation> </message> <message> <location line="-9"/> <source>Set database cache size in megabytes (default: 25)</source> <translation>Definir o tamanho da cache de base de dados em megabytes (por defeito: 25)</translation> </message> <message> <location line="-28"/> <source>Listen for connections on &lt;port&gt; (default: 9333 or testnet: 19333)</source> <translation>Escute por ligações em &lt;port&gt; (por defeito: 9333 ou testnet: 19333)</translation> </message> <message> <location line="+5"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Manter no máximo &lt;n&gt; ligações a outros nós da rede (por defeito: 125)</translation> </message> <message> <location line="-48"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation>Ligar a um nó para recuperar endereços de pares, e desligar</translation> </message> <message> <location line="+82"/> <source>Specify your own public address</source> <translation>Especifique o seu endereço público</translation> </message> <message> <location line="+3"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Tolerância para desligar nós mal-formados (por defeito: 100)</translation> </message> <message> <location line="-134"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Número de segundos a impedir que nós mal-formados se liguem de novo (por defeito: 86400)</translation> </message> <message> <location line="-29"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation>Ocorreu um erro ao definir a porta %u do serviço RPC a escutar em IPv4: %s</translation> </message> <message> <location line="+27"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 9332 or testnet: 19332)</source> <translation>Escutar por ligações JSON-RPC em &lt;port&gt; (por defeito: 9332 ou rede de testes: 19332)</translation> </message> <message> <location line="+37"/> <source>Accept command line and JSON-RPC commands</source> <translation>Aceitar comandos da consola e JSON-RPC</translation> </message> <message> <location line="+76"/> <source>Run in the background as a daemon and accept commands</source> <translation>Correr o processo como um daemon e aceitar comandos</translation> </message> <message> <location line="+37"/> <source>Use the test network</source> <translation>Utilizar a rede de testes - testnet</translation> </message> <message> <location line="-112"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation>Aceitar ligações externas (padrão: 1 sem -proxy ou -connect)</translation> </message> <message> <location line="-80"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=pokemoncoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;pokemoncoin Alert&quot; [email protected] </source> <translation>%s, deverá definir rpcpassword no ficheiro de configuração : %s É recomendado que use a seguinte palavra-passe aleatória: rpcuser=pokemoncoinrpc rpcpassword=%s (não precisa recordar esta palavra-passe) O nome de utilizador e password NÃO DEVEM ser iguais. Se o ficheiro não existir, crie-o com permissões de leitura apenas para o dono. Também é recomendado definir alertnotify para que seja alertado sobre problemas; por exemplo: alertnotify=echo %%s | mail -s &quot;Alerta pokemoncoin&quot; [email protected] </translation> </message> <message> <location line="+17"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation>Ocorreu um erro ao definir a porta %u do serviço RPC a escutar em IPv6, a usar IPv4: %s</translation> </message> <message> <location line="+3"/> <source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source> <translation>Trancar a endereço específio e sempre escutar nele. Use a notação [anfitrião]:porta para IPv6</translation> </message> <message> <location line="+3"/> <source>Cannot obtain a lock on data directory %s. pokemoncoin is probably already running.</source> <translation>Impossível trancar a pasta de dados %s. Provavelmente o pokemoncoin já está a ser executado.</translation> </message> <message> <location line="+3"/> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation>Erro: A transação foi rejeitada. Isso poderá acontecer se algumas das moedas na sua carteira já tiverem sido gastas, se por exemplo tiver usado uma cópia do ficheiro wallet.dat e as moedas foram gastas na cópia mas não foram marcadas como gastas aqui.</translation> </message> <message> <location line="+4"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation>Erro: Esta transação requer uma taxa de transação mínima de %s devido á sua quantia, complexidade, ou uso de fundos recebidos recentemente! </translation> </message> <message> <location line="+3"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation>Executar comando quando um alerta relevante for recebido (no comando, %s é substituído pela mensagem)</translation> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation>Executar comando quando uma das transações na carteira mudar (no comando, %s é substituído pelo ID da Transação)</translation> </message> <message> <location line="+11"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation>Definir tamanho máximo de transações de alta-/baixa-prioridade em bytes (por defeito: 27000)</translation> </message> <message> <location line="+6"/> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation>Esta é uma versão de pré-lançamento - use à sua responsabilidade - não usar para minar ou aplicações comerciais</translation> </message> <message> <location line="+5"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Atenção: -paytxfee está definida com um valor muito alto! Esta é a taxa que irá pagar se enviar uma transação.</translation> </message> <message> <location line="+3"/> <source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source> <translation>Atenção: As transações mostradas poderão não estar correctas! Poderá ter que atualizar ou outros nós poderão ter que atualizar.</translation> </message> <message> <location line="+3"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong pokemoncoin will not work properly.</source> <translation>Atenção: Por favor verifique que a data e hora do seu computador estão correctas! Se o seu relógio não estiver certo o pokemoncoin não irá funcionar correctamente.</translation> </message> <message> <location line="+3"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation>Atenção: erro ao ler wallet.dat! Todas as chaves foram lidas correctamente, mas dados de transação ou do livro de endereços podem estar em falta ou incorrectos.</translation> </message> <message> <location line="+3"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation>Atenção: wallet.dat corrupto, dados recuperados! wallet.dat original salvo como wallet.{timestamp}.bak em %s; se o seu saldo ou transações estiverem incorrectos deverá recuperar de uma cópia de segurança.</translation> </message> <message> <location line="+14"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation>Tentar recuperar chaves privadas de um wallet.dat corrupto</translation> </message> <message> <location line="+2"/> <source>Block creation options:</source> <translation>Opções de criação de bloco:</translation> </message> <message> <location line="+5"/> <source>Connect only to the specified node(s)</source> <translation>Apenas ligar ao(s) nó(s) especificado(s)</translation> </message> <message> <location line="+3"/> <source>Corrupted block database detected</source> <translation>Cadeia de blocos corrompida detectada</translation> </message> <message> <location line="+1"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation>Descobrir endereço IP próprio (padrão: 1 ao escutar e sem -externalip)</translation> </message> <message> <location line="+1"/> <source>Do you want to rebuild the block database now?</source> <translation>Deseja reconstruir agora a cadeia de blocos?</translation> </message> <message> <location line="+2"/> <source>Error initializing block database</source> <translation>Erro ao inicializar a cadeia de blocos</translation> </message> <message> <location line="+1"/> <source>Error initializing wallet database environment %s!</source> <translation>Erro ao inicializar o ambiente de base de dados da carteira %s!</translation> </message> <message> <location line="+1"/> <source>Error loading block database</source> <translation>Erro ao carregar cadeia de blocos</translation> </message> <message> <location line="+4"/> <source>Error opening block database</source> <translation>Erro ao abrir a cadeia de blocos</translation> </message> <message> <location line="+2"/> <source>Error: Disk space is low!</source> <translation>Erro: Pouco espaço em disco!</translation> </message> <message> <location line="+1"/> <source>Error: Wallet locked, unable to create transaction!</source> <translation>Erro: Carteira bloqueada, incapaz de criar transação! </translation> </message> <message> <location line="+1"/> <source>Error: system error: </source> <translation>Erro: erro do sistema:</translation> </message> <message> <location line="+1"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation>Falhou a escutar em qualquer porta. Use -listen=0 se quer isto.</translation> </message> <message> <location line="+1"/> <source>Failed to read block info</source> <translation>Falha ao ler info do bloco</translation> </message> <message> <location line="+1"/> <source>Failed to read block</source> <translation>Falha ao ler bloco</translation> </message> <message> <location line="+1"/> <source>Failed to sync block index</source> <translation>Falha ao sincronizar índice do bloco</translation> </message> <message> <location line="+1"/> <source>Failed to write block index</source> <translation>Falha ao escrever índice do bloco</translation> </message> <message> <location line="+1"/> <source>Failed to write block info</source> <translation>Falha ao escrever info do bloco</translation> </message> <message> <location line="+1"/> <source>Failed to write block</source> <translation>Falha ao escrever o bloco</translation> </message> <message> <location line="+1"/> <source>Failed to write file info</source> <translation>Falha ao escrever info do ficheiro</translation> </message> <message> <location line="+1"/> <source>Failed to write to coin database</source> <translation>Falha ao escrever na base de dados de moedas</translation> </message> <message> <location line="+1"/> <source>Failed to write transaction index</source> <translation>Falha ao escrever índice de transações</translation> </message> <message> <location line="+1"/> <source>Failed to write undo data</source> <translation>Falha ao escrever histórico de modificações</translation> </message> <message> <location line="+2"/> <source>Find peers using DNS lookup (default: 1 unless -connect)</source> <translation>Encontrar pares usando procura DNS (por defeito: 1 excepto -connect)</translation> </message> <message> <location line="+1"/> <source>Generate coins (default: 0)</source> <translation>Gerar moedas (por defeito: 0)</translation> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 288, 0 = all)</source> <translation>Quantos blocos verificar ao começar (por defeito: 288, 0 = todos)</translation> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-4, default: 3)</source> <translation>Qual a minúcia na verificação de blocos (0-4, por defeito: 3)</translation> </message> <message> <location line="+19"/> <source>Not enough file descriptors available.</source> <translation>Descritores de ficheiros disponíveis são insuficientes.</translation> </message> <message> <location line="+8"/> <source>Rebuild block chain index from current blk000??.dat files</source> <translation>Reconstruir a cadeia de blocos dos ficheiros blk000??.dat actuais</translation> </message> <message> <location line="+16"/> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation>Defina o número de processos para servir as chamadas RPC (por defeito: 4)</translation> </message> <message> <location line="+26"/> <source>Verifying blocks...</source> <translation>Verificando blocos...</translation> </message> <message> <location line="+1"/> <source>Verifying wallet...</source> <translation>Verificando a carteira...</translation> </message> <message> <location line="-69"/> <source>Imports blocks from external blk000??.dat file</source> <translation>Importar blocos de um ficheiro blk000??.dat externo</translation> </message> <message> <location line="-76"/> <source>Set the number of script verification threads (up to 16, 0 = auto, &lt;0 = leave that many cores free, default: 0)</source> <translation>Defina o número de processos de verificação (até 16, 0 = automático, &lt;0 = disponibiliza esse número de núcleos livres, por defeito: 0)</translation> </message> <message> <location line="+77"/> <source>Information</source> <translation>Informação</translation> </message> <message> <location line="+3"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation>Endereço -tor inválido: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount for -minrelaytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Quantia inválida para -minrelaytxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount for -mintxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Quantia inválida para -mintxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+8"/> <source>Maintain a full transaction index (default: 0)</source> <translation>Manter índice de transações completo (por defeito: 0)</translation> </message> <message> <location line="+2"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Armazenamento intermédio de recepção por ligação, &lt;n&gt;*1000 bytes (por defeito: 5000)</translation> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Armazenamento intermédio de envio por ligação, &lt;n&gt;*1000 bytes (por defeito: 1000)</translation> </message> <message> <location line="+2"/> <source>Only accept block chain matching built-in checkpoints (default: 1)</source> <translation>Apenas aceitar cadeia de blocos coincidente com marcas de verificação internas (por defeito: 1)</translation> </message> <message> <location line="+1"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation>Apenas ligar a nós na rede &lt;net&gt; (IPv4, IPv6 ou Tor)</translation> </message> <message> <location line="+2"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation>Produzir informação de depuração extra. Implica todas as outras opções -debug*</translation> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation>Produzir informação de depuração extraordinária</translation> </message> <message> <location line="+2"/> <source>Prepend debug output with timestamp</source> <translation>Preceder informação de depuração com selo temporal</translation> </message> <message> <location line="+5"/> <source>SSL options: (see the pokemoncoin Wiki for SSL setup instructions)</source> <translation>Opções SSL: (ver a Wiki pokemoncoin para instruções de configuração SSL)</translation> </message> <message> <location line="+1"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation>Selecione a versão do proxy socks a usar (4-5, padrão: 5)</translation> </message> <message> <location line="+3"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Enviar informação de rastreio/depuração para a consola e não para o ficheiro debug.log</translation> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation>Enviar informação de rastreio/depuração para o depurador</translation> </message> <message> <location line="+5"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation>Definir tamanho máximo de um bloco em bytes (por defeito: 250000)</translation> </message> <message> <location line="+1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation>Definir tamanho minímo de um bloco em bytes (por defeito: 0)</translation> </message> <message> <location line="+2"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation>Encolher ficheiro debug.log ao iniciar o cliente (por defeito: 1 sem -debug definido)</translation> </message> <message> <location line="+1"/> <source>Signing transaction failed</source> <translation>Falhou assinatura da transação</translation> </message> <message> <location line="+2"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Especificar tempo de espera da ligação em millisegundos (por defeito: 5000)</translation> </message> <message> <location line="+4"/> <source>System error: </source> <translation>Erro de sistema:</translation> </message> <message> <location line="+4"/> <source>Transaction amount too small</source> <translation>Quantia da transação é muito baixa</translation> </message> <message> <location line="+1"/> <source>Transaction amounts must be positive</source> <translation>Quantia da transação deverá ser positiva</translation> </message> <message> <location line="+1"/> <source>Transaction too large</source> <translation>Transação grande demais</translation> </message> <message> <location line="+7"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Usar UPnP para mapear a porta de escuta (padrão: 0)</translation> </message> <message> <location line="+1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Usar UPnP para mapear a porta de escuta (padrão: 1 ao escutar)</translation> </message> <message> <location line="+1"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation>Utilizar proxy para aceder a serviços escondidos Tor (por defeito: mesmo que -proxy)</translation> </message> <message> <location line="+2"/> <source>Username for JSON-RPC connections</source> <translation>Nome de utilizador para ligações JSON-RPC</translation> </message> <message> <location line="+4"/> <source>Warning</source> <translation>Aviso</translation> </message> <message> <location line="+1"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation>Atenção: Esta versão está obsoleta, é necessário actualizar!</translation> </message> <message> <location line="+1"/> <source>You need to rebuild the databases using -reindex to change -txindex</source> <translation>Necessita reconstruir as bases de dados usando -reindex para mudar -txindex</translation> </message> <message> <location line="+1"/> <source>wallet.dat corrupt, salvage failed</source> <translation>wallet.dat corrupta, recuperação falhou</translation> </message> <message> <location line="-50"/> <source>Password for JSON-RPC connections</source> <translation>Palavra-passe para ligações JSON-RPC</translation> </message> <message> <location line="-67"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Permitir ligações JSON-RPC do endereço IP especificado</translation> </message> <message> <location line="+76"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Enviar comandos para o nó a correr em &lt;ip&gt; (por defeito: 127.0.0.1)</translation> </message> <message> <location line="-120"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation>Executar comando quando mudar o melhor bloco (no comando, %s é substituído pela hash do bloco)</translation> </message> <message> <location line="+147"/> <source>Upgrade wallet to latest format</source> <translation>Atualize a carteira para o formato mais recente</translation> </message> <message> <location line="-21"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Definir o tamanho da memória de chaves para &lt;n&gt; (por defeito: 100)</translation> </message> <message> <location line="-12"/> <source>Rescan the block chain for missing wallet transactions</source> <translation>Reexaminar a cadeia de blocos para transações em falta na carteira</translation> </message> <message> <location line="+35"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Usar OpenSSL (https) para ligações JSON-RPC</translation> </message> <message> <location line="-26"/> <source>Server certificate file (default: server.cert)</source> <translation>Ficheiro de certificado do servidor (por defeito: server.cert)</translation> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation>Chave privada do servidor (por defeito: server.pem)</translation> </message> <message> <location line="-151"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation>Cifras aceitáveis (por defeito: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation> </message> <message> <location line="+165"/> <source>This help message</source> <translation>Esta mensagem de ajuda</translation> </message> <message> <location line="+6"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Incapaz de vincular a %s neste computador (vínculo retornou erro %d, %s)</translation> </message> <message> <location line="-91"/> <source>Connect through socks proxy</source> <translation>Ligar através de um proxy socks</translation> </message> <message> <location line="-10"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Permitir procuras DNS para -addnode, -seednode e -connect</translation> </message> <message> <location line="+55"/> <source>Loading addresses...</source> <translation>Carregar endereços...</translation> </message> <message> <location line="-35"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation>Erro ao carregar wallet.dat: Carteira danificada</translation> </message> <message> <location line="+1"/> <source>Error loading wallet.dat: Wallet requires newer version of pokemoncoin</source> <translation>Erro ao carregar wallet.dat: A Carteira requer uma versão mais recente do pokemoncoin</translation> </message> <message> <location line="+93"/> <source>Wallet needed to be rewritten: restart pokemoncoin to complete</source> <translation>A Carteira precisou ser reescrita: reinicie o pokemoncoin para completar</translation> </message> <message> <location line="-95"/> <source>Error loading wallet.dat</source> <translation>Erro ao carregar wallet.dat</translation> </message> <message> <location line="+28"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Endereço -proxy inválido: &apos;%s&apos;</translation> </message> <message> <location line="+56"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation>Rede desconhecida especificada em -onlynet: &apos;%s&apos;</translation> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation>Versão desconhecida de proxy -socks requisitada: %i</translation> </message> <message> <location line="-96"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation>Não conseguiu resolver endereço -bind: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation>Não conseguiu resolver endereço -externalip: &apos;%s&apos;</translation> </message> <message> <location line="+44"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Quantia inválida para -paytxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <location line="+1"/> <source>Invalid amount</source> <translation>Quantia inválida</translation> </message> <message> <location line="-6"/> <source>Insufficient funds</source> <translation>Fundos insuficientes</translation> </message> <message> <location line="+10"/> <source>Loading block index...</source> <translation>Carregar índice de blocos...</translation> </message> <message> <location line="-57"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Adicione um nó ao qual se ligar e tentar manter a ligação aberta</translation> </message> <message> <location line="-25"/> <source>Unable to bind to %s on this computer. pokemoncoin is probably already running.</source> <translation>Incapaz de vincular à porta %s neste computador. Provavelmente o pokemoncoin já está a funcionar.</translation> </message> <message> <location line="+64"/> <source>Fee per KB to add to transactions you send</source> <translation>Taxa por KB a adicionar a transações enviadas</translation> </message> <message> <location line="+19"/> <source>Loading wallet...</source> <translation>Carregar carteira...</translation> </message> <message> <location line="-52"/> <source>Cannot downgrade wallet</source> <translation>Impossível mudar a carteira para uma versão anterior</translation> </message> <message> <location line="+3"/> <source>Cannot write default address</source> <translation>Impossível escrever endereço por defeito</translation> </message> <message> <location line="+64"/> <source>Rescanning...</source> <translation>Reexaminando...</translation> </message> <message> <location line="-57"/> <source>Done loading</source> <translation>Carregamento completo</translation> </message> <message> <location line="+82"/> <source>To use the %s option</source> <translation>Para usar a opção %s</translation> </message> <message> <location line="-74"/> <source>Error</source> <translation>Erro</translation> </message> <message> <location line="-31"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation>Deverá definir rpcpassword=&lt;password&gt; no ficheiro de configuração: %s Se o ficheiro não existir, crie-o com permissões de leitura apenas para o dono.</translation> </message> </context> </TS>
<source>Show transaction details</source> <translation>Mostrar detalhes da transação</translation> </message> <message>
test.clients.ts
// Copyright 2013-2016, Google, Inc. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import * as async from 'async'; import * as fs from 'fs'; import * as nock from 'nock'; import * as path from 'path'; import * as assert from 'power-assert'; import {Utils} from './utils'; const googleapis = require('../src/lib/googleapis'); describe('Clients', () => { let localPlus, remotePlus; let localOauth2, remoteOauth2; before((done) => { nock.cleanAll(); const google = new googleapis.GoogleApis(); nock.enableNetConnect(); async.parallel( [ (cb) => { Utils.loadApi(google, 'plus', 'v1', {}, cb); }, (cb) => { Utils.loadApi(google, 'oauth2', 'v2', {}, cb); } ], (err, apis) => { if (err) { return done(err); } remotePlus = apis[0]; remoteOauth2 = apis[1]; nock.disableNetConnect(); done(); }); }); beforeEach(() => { nock.cleanAll(); nock.disableNetConnect(); const google = new googleapis.GoogleApis(); localPlus = google.plus('v1'); localOauth2 = google.oauth2('v2'); }); it('should create request helpers according to resource on discovery API response', () => { let plus = localPlus; assert.equal(typeof plus.people.get, 'function'); assert.equal(typeof plus.activities.search, 'function'); assert.equal(typeof plus.comments.list, 'function'); plus = remotePlus; assert.equal(typeof plus.people.get, 'function'); assert.equal(typeof plus.activities.search, 'function'); assert.equal(typeof plus.comments.list, 'function'); }); it('should be able to gen top level methods', () => { assert.equal(typeof localOauth2.tokeninfo, 'function'); assert.equal(typeof remoteOauth2.tokeninfo, 'function'); }); it('should be able to gen top level methods and resources', () => { let oauth2 = localOauth2; assert.equal(typeof oauth2.tokeninfo, 'function'); assert.equal(typeof oauth2.userinfo, 'object'); oauth2 = remoteOauth2; assert.equal(typeof oauth2.tokeninfo, 'function'); assert.equal(typeof oauth2.userinfo, 'object'); }); it('should be able to gen nested resources and methods', () => { let oauth2 = localOauth2; assert.equal(typeof oauth2.userinfo, 'object'); assert.equal(typeof oauth2.userinfo.v2, 'object'); assert.equal(typeof oauth2.userinfo.v2.me, 'object'); assert.equal(typeof oauth2.userinfo.v2.me.get, 'function'); oauth2 = remoteOauth2; assert.equal(typeof oauth2.userinfo, 'object'); assert.equal(typeof oauth2.userinfo.v2, 'object'); assert.equal(typeof oauth2.userinfo.v2.me, 'object'); assert.equal(typeof oauth2.userinfo.v2.me.get, 'function'); }); it('should be able to require all api files without error', () => { function
(dir: string, files?: string[]) { files = files || []; if (typeof files === 'undefined') { files = []; } const files2 = fs.readdirSync(dir); for (const i in files2) { if (!files2.hasOwnProperty(i)) { continue; } const name = dir + '/' + files2[i]; if (fs.statSync(name).isDirectory()) { getFiles(name, files); } else { if (path.extname(name) === '.js') { files.push(name); } } } return files; } const apiFiles = getFiles(path.join(__dirname, '/../src/apis')); assert.doesNotThrow(() => { for (const i in apiFiles) { if (apiFiles.hasOwnProperty(i)) { try { require(apiFiles[i]); } catch (err) { console.error(err); throw err; } } } }); }); it('should support default params', (done) => { const google = new googleapis.GoogleApis(); const datastore = google.datastore({version: 'v1beta3', params: {myParam: '123'}}); const req = datastore.projects.lookup({projectId: 'test-project-id'}, Utils.noop); // If the default param handling is broken, query might be undefined, thus // concealing the assertion message with some generic "cannot call .indexOf // of undefined" const query = req.uri.query || ''; assert.notEqual(query.indexOf('myParam=123'), -1, 'Default param in query'); nock.enableNetConnect(); Utils.loadApi( google, 'datastore', 'v1beta3', {params: {myParam: '123'}}, (err, datastore2) => { nock.disableNetConnect(); if (err) { return done(err); } const req2 = datastore2.projects.lookup( {projectId: 'test-project-id'}, Utils.noop); const query2 = req2.uri.query || ''; assert.notEqual( query2.indexOf('myParam=123'), -1, 'Default param in query'); done(); }); }); it('should allow default params to be overriden per-request', (done) => { const google = new googleapis.GoogleApis(); const datastore = google.datastore({version: 'v1beta3', params: {myParam: '123'}}); // Override the default datasetId param for this particular API call const req = datastore.projects.lookup( {projectId: 'test-project-id', myParam: '456'}, Utils.noop); // If the default param handling is broken, query might be undefined, thus // concealing the assertion message with some generic "cannot call .indexOf // of undefined" const query = req.uri.query || ''; assert.notEqual( query.indexOf('myParam=456'), -1, 'Default param not found in query'); nock.enableNetConnect(); Utils.loadApi( google, 'datastore', 'v1beta3', {params: {myParam: '123'}}, (err, datastore2) => { nock.disableNetConnect(); if (err) { return done(err); } // Override the default datasetId param for this particular API call const req2 = datastore2.projects.lookup( {projectId: 'test-project-id', myParam: '456'}, Utils.noop); // If the default param handling is broken, query might be undefined, // thus concealing the assertion message with some generic "cannot // call .indexOf of undefined" const query2 = req2.uri.query || ''; assert.notEqual( query2.indexOf('myParam=456'), -1, 'Default param not found in query'); done(); }); }); it('should include default params when only callback is provided to API call', (done) => { const google = new googleapis.GoogleApis(); const datastore = google.datastore({ version: 'v1beta3', params: { projectId: 'test-project-id', // We must set this here - it is a // required param myParam: '123' } }); // No params given - only callback const req = datastore.projects.lookup(Utils.noop); // If the default param handling is broken, req or query might be // undefined, thus concealing the assertion message with some generic // "cannot call .indexOf of undefined" const query = (req && req.uri.query) || ''; assert.notEqual( query.indexOf('myParam=123'), -1, 'Default param not found in query'); nock.enableNetConnect(); Utils.loadApi( google, 'datastore', 'v1beta3', { params: { projectId: 'test-project-id', // We must set this here - it is a // required param myParam: '123' } }, (err, datastore2) => { nock.disableNetConnect(); if (err) { return done(err); } // No params given - only callback const req2 = datastore2.projects.lookup(Utils.noop); // If the default param handling is broken, req or query might be // undefined, thus concealing the assertion message with some // generic "cannot call .indexOf of undefined" const query2 = (req2 && req2.uri.query) || ''; assert.notEqual( query2.indexOf('myParam=123'), -1, 'Default param not found in query'); done(); }); }); after(() => { nock.cleanAll(); nock.enableNetConnect(); }); });
getFiles
fake_analyticsoutputservicebusqueue.go
/* Copyright AppsCode Inc. and Contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by client-gen. DO NOT EDIT. package fake import ( "context" v1alpha1 "kubeform.dev/provider-azurerm-api/apis/stream/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" ) // FakeAnalyticsOutputServicebusQueues implements AnalyticsOutputServicebusQueueInterface type FakeAnalyticsOutputServicebusQueues struct { Fake *FakeStreamV1alpha1 ns string } var analyticsoutputservicebusqueuesResource = schema.GroupVersionResource{Group: "stream.azurerm.kubeform.com", Version: "v1alpha1", Resource: "analyticsoutputservicebusqueues"} var analyticsoutputservicebusqueuesKind = schema.GroupVersionKind{Group: "stream.azurerm.kubeform.com", Version: "v1alpha1", Kind: "AnalyticsOutputServicebusQueue"} // Get takes name of the analyticsOutputServicebusQueue, and returns the corresponding analyticsOutputServicebusQueue object, and an error if there is any. func (c *FakeAnalyticsOutputServicebusQueues) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AnalyticsOutputServicebusQueue, err error) { obj, err := c.Fake. Invokes(testing.NewGetAction(analyticsoutputservicebusqueuesResource, c.ns, name), &v1alpha1.AnalyticsOutputServicebusQueue{}) if obj == nil { return nil, err } return obj.(*v1alpha1.AnalyticsOutputServicebusQueue), err } // List takes label and field selectors, and returns the list of AnalyticsOutputServicebusQueues that match those selectors. func (c *FakeAnalyticsOutputServicebusQueues) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AnalyticsOutputServicebusQueueList, err error) { obj, err := c.Fake. Invokes(testing.NewListAction(analyticsoutputservicebusqueuesResource, analyticsoutputservicebusqueuesKind, c.ns, opts), &v1alpha1.AnalyticsOutputServicebusQueueList{}) if obj == nil { return nil, err } label, _, _ := testing.ExtractFromListOptions(opts) if label == nil { label = labels.Everything() } list := &v1alpha1.AnalyticsOutputServicebusQueueList{ListMeta: obj.(*v1alpha1.AnalyticsOutputServicebusQueueList).ListMeta} for _, item := range obj.(*v1alpha1.AnalyticsOutputServicebusQueueList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } } return list, err } // Watch returns a watch.Interface that watches the requested analyticsOutputServicebusQueues. func (c *FakeAnalyticsOutputServicebusQueues) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. InvokesWatch(testing.NewWatchAction(analyticsoutputservicebusqueuesResource, c.ns, opts)) } // Create takes the representation of a analyticsOutputServicebusQueue and creates it. Returns the server's representation of the analyticsOutputServicebusQueue, and an error, if there is any. func (c *FakeAnalyticsOutputServicebusQueues) Create(ctx context.Context, analyticsOutputServicebusQueue *v1alpha1.AnalyticsOutputServicebusQueue, opts v1.CreateOptions) (result *v1alpha1.AnalyticsOutputServicebusQueue, err error) { obj, err := c.Fake. Invokes(testing.NewCreateAction(analyticsoutputservicebusqueuesResource, c.ns, analyticsOutputServicebusQueue), &v1alpha1.AnalyticsOutputServicebusQueue{}) if obj == nil { return nil, err } return obj.(*v1alpha1.AnalyticsOutputServicebusQueue), err } // Update takes the representation of a analyticsOutputServicebusQueue and updates it. Returns the server's representation of the analyticsOutputServicebusQueue, and an error, if there is any. func (c *FakeAnalyticsOutputServicebusQueues) Update(ctx context.Context, analyticsOutputServicebusQueue *v1alpha1.AnalyticsOutputServicebusQueue, opts v1.UpdateOptions) (result *v1alpha1.AnalyticsOutputServicebusQueue, err error) { obj, err := c.Fake. Invokes(testing.NewUpdateAction(analyticsoutputservicebusqueuesResource, c.ns, analyticsOutputServicebusQueue), &v1alpha1.AnalyticsOutputServicebusQueue{}) if obj == nil { return nil, err } return obj.(*v1alpha1.AnalyticsOutputServicebusQueue), err
} // UpdateStatus was generated because the type contains a Status member. // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). func (c *FakeAnalyticsOutputServicebusQueues) UpdateStatus(ctx context.Context, analyticsOutputServicebusQueue *v1alpha1.AnalyticsOutputServicebusQueue, opts v1.UpdateOptions) (*v1alpha1.AnalyticsOutputServicebusQueue, error) { obj, err := c.Fake. Invokes(testing.NewUpdateSubresourceAction(analyticsoutputservicebusqueuesResource, "status", c.ns, analyticsOutputServicebusQueue), &v1alpha1.AnalyticsOutputServicebusQueue{}) if obj == nil { return nil, err } return obj.(*v1alpha1.AnalyticsOutputServicebusQueue), err } // Delete takes name of the analyticsOutputServicebusQueue and deletes it. Returns an error if one occurs. func (c *FakeAnalyticsOutputServicebusQueues) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. Invokes(testing.NewDeleteAction(analyticsoutputservicebusqueuesResource, c.ns, name), &v1alpha1.AnalyticsOutputServicebusQueue{}) return err } // DeleteCollection deletes a collection of objects. func (c *FakeAnalyticsOutputServicebusQueues) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewDeleteCollectionAction(analyticsoutputservicebusqueuesResource, c.ns, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.AnalyticsOutputServicebusQueueList{}) return err } // Patch applies the patch and returns the patched analyticsOutputServicebusQueue. func (c *FakeAnalyticsOutputServicebusQueues) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AnalyticsOutputServicebusQueue, err error) { obj, err := c.Fake. Invokes(testing.NewPatchSubresourceAction(analyticsoutputservicebusqueuesResource, c.ns, name, pt, data, subresources...), &v1alpha1.AnalyticsOutputServicebusQueue{}) if obj == nil { return nil, err } return obj.(*v1alpha1.AnalyticsOutputServicebusQueue), err }
mod.rs
pub mod ic880a_eu868; pub mod ic880a_in865; pub mod ic880a_ru864;
dpagg_test.go
// // Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // package dpagg import ( "math" "github.com/google/differential-privacy/go/noise" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" ) // This file contains structs, functions, and values used to test DP aggregations. var ( ln3 = math.Log(3) tenten = math.Pow10(-10) tenfive = math.Pow10(-5) alphaLevel = 0.05 ) // noNoise is a Noise instance that doesn't add noise to the data, and has a // threshold of 5. type noNoise struct { noise.Noise } func (noNoise) AddNoiseInt64(x, _, _ int64, _, _ float64) int64 { return x } func (noNoise) AddNoiseFloat64(x float64, _ int64, _, _, _ float64) float64 { return x } func ApproxEqual(x, y float64) bool
func (noNoise) Threshold(_ int64, _, _, _, _ float64) float64 { return 5 } func (nN noNoise) ComputeConfidenceIntervalInt64(noisedX, l0, lInf int64, eps, del, alpha float64) (noise.ConfidenceInterval, error) { confInt, err := nN.Noise.ComputeConfidenceIntervalInt64(noisedX, l0, lInf, eps, del, alpha) return confInt, err } func (nN noNoise) ComputeConfidenceIntervalFloat64(noisedX float64, l0 int64, lInf, eps, del, alpha float64) (noise.ConfidenceInterval, error) { confInt, err := nN.Noise.ComputeConfidenceIntervalFloat64(noisedX, l0, lInf, eps, del, alpha) return confInt, err } func getNoiselessConfInt(noise noise.Noise) noise.Noise { return noNoise{noise} } // mockConfInt is a Noise instance that returns a pre-set confidence interval. // Useful for testing post-processing in confidence intervals. type mockConfInt struct { noNoise confInt noise.ConfidenceInterval } func (mCI mockConfInt) ComputeConfidenceIntervalInt64(_, _, _ int64, _, _, _ float64) (noise.ConfidenceInterval, error) { return mCI.confInt, nil } func (mCI mockConfInt) ComputeConfidenceIntervalFloat64(_ float64, _ int64, _, _, _, _ float64) (noise.ConfidenceInterval, error) { return mCI.confInt, nil } func getMockConfInt(confInt noise.ConfidenceInterval) noise.Noise { return mockConfInt{confInt: confInt} }
{ return cmp.Equal(x, y, cmpopts.EquateApprox(0, tenten)) }
commands.py
import click from strava.commands import get_activity, get_constrain_activity, get_weekly_activity, get_lap_activity
cli_activity.add_command(get_activity) cli_activity.add_command(get_constrain_activity) cli_activity.add_command(get_weekly_activity) cli_activity.add_command(get_lap_activity)
@click.group(name='activity', help='[GROUP] Get the summary of one or multiple activities.') def cli_activity(): pass
template.py
import re from typing import Tuple from duty.utils import att_parse, format_response from duty.objects import MySignalEvent, dp def delete_template(name: str, templates: list) -> Tuple[list, bool]: for template in templates: if template['name'].lower() == name: templates.remove(template) return templates, True return templates, False def get_template_list(event: MySignalEvent, templates: list): if len(event.args) > 1: if event.args[-1].isdigit() or (event.args[-1].startswith('-') and event.args[-1][1:].isdigit()): page = int(event.args.pop(-1)) if page > 0: page -= 1 else: page = 0 category = ' '.join(event.args).lower() template_list = None if not category: cats = {} for t in templates: cats[t['cat']] = cats.get(t['cat'], 0) + 1 message = "📚 Категории {name_genitive}:" for cat in cats: message += f"\n-- {cat} ({cats[cat]})" else: if category == 'все': message = '📃 Список всех {name_genitive}:' category = None else: message = f'📖 {{name_accusative_cap}} категории "{category}":' message += list_by_page(templates, page, category) if '\n' not in message: if templates == []: message = '{no_templates}' else: message = '⚠️ {name_accusative_cap} по указанному запросу не найдены' return message def list_by_page(templates, page, category) -> str: if len(templates) > 40: if page >= 0: message = f'(страница #{page+1})' else: message = f'(страница #{abs(page)} с конца)' else: message = '' shift = page*40 sliced_list = templates[shift:shift+40] if shift >= 0 else templates[shift-1:shift+39] if page < 0: try: sliced_list.append(templates[shift+39])
except IndexError: pass offset = (shift+1) if shift >= 0 else (len(templates)+shift) for i, t in enumerate(sliced_list, offset): if category: if t['cat'] != category: continue message += f'\n-- {t["name"]}' else: message += f'\n{i}. {t["name"]} | {t["cat"]}' if '\n' not in message: return '' return '\n' + message @dp.longpoll_event_register('+шаб') @dp.my_signal_event_register('+шаб') def template_create(event: MySignalEvent) -> str: name = re.findall(r"([^|]+)\|?([^|]*)", ' '.join(event.args)) if not name: event.msg_op(2, "❗ Не указано название") return "ok" category = name[0][1].lower().strip() or 'без категории' name = name[0][0].lower().strip() if category == 'все': event.msg_op(2, '❗ Невозможно создать шаблон с категорией "все"') return "ok" if not (event.payload or event.attachments or event.reply_message): event.msg_op(2, "❗ Нет данных") return "ok" if event.reply_message: data = event.reply_message['text'] event.attachments = att_parse(event.reply_message['attachments']) if event.attachments: if event.attachments[0].startswith('audio_message'): event.msg_op(2, '⚠️ Для сохранения ГС используй команду "+гс"') return "ok" else: data = event.payload event.db.templates, exist = delete_template(name, event.db.templates) event.db.templates.append({ "name": name, "payload": data, "cat": category, "attachments": event.attachments }) event.msg_op(2, f'✅ Шаблон "{name}" ' + ("перезаписан" if exist else "сохранен"), delete=2) return "ok" @dp.longpoll_event_register('шабы') @dp.my_signal_event_register('шабы') def template_list(event: MySignalEvent) -> str: message = get_template_list(event, event.db.templates) event.msg_op(2, format_response(message, name_genitive='шаблонов', name_accusative='шаблоны', name_accusative_cap='Шаблоны', no_templates='👀 Нет ни одного шаблона... Для создания используй команду "+шаб"' )) return "ok" def get_name(event: MySignalEvent) -> Tuple[MySignalEvent, str]: return event, ' '.join(event.args).lower() @dp.longpoll_event_register('-шаб') @dp.my_signal_event_register('-шаб') @dp.wrap_handler(get_name) def template_delete(event: MySignalEvent, name: str) -> str: event.db.templates, exist = delete_template(name, event.db.templates) if exist: msg = f'✅ Шаблон "{name}" удален' else: msg = f'⚠️ Шаблон "{name}" не найден' event.msg_op(2, msg, delete=1) return "ok" @dp.longpoll_event_register('шаб') @dp.my_signal_event_register('шаб') @dp.wrap_handler(get_name) def template_show(event: MySignalEvent, name: str) -> str: template = None for temp in event.db.templates: if temp['name'] == name: template = temp break if template: atts = template['attachments'] atts.extend(event.attachments) event.msg_op(2, temp['payload'] + '\n' + event.payload, keep_forward_messages=1, attachment=','.join(atts)) else: event.msg_op(2, f'❗ Шаблон "{name}" не найден') return "ok"
Sort.test.ts
/** * @jest-environment jsdom */ import moment from 'moment'; window.moment = moment; import { Sort } from '../src/Sort'; import { getSettings, updateSettings } from '../src/Settings'; import { fromLine } from './TestHelpers'; describe('Sort', () => { it('sorts correctly by default order', () => { const one = fromLine({ line: '- [ ] a 📅 1970-01-01', path: '3' }); const two = fromLine({ line: '- [ ] c 📅 1970-01-02', path: '3' }); const three = fromLine({ line: '- [ ] d 📅 1970-01-03', path: '2' }); const four = fromLine({ line: '- [x] d 📅 1970-01-02', path: '2' }); const five = fromLine({ line: '- [x] b 📅 1970-01-02', path: '3' }); const six = fromLine({ line: '- [x] d 📅 1970-01-03', path: '2' }); const expectedOrder = [one, two, three, four, five, six]; expect( Sort.by({ sorting: [] }, [six, five, one, four, two, three]), ).toEqual(expectedOrder); }); it('sorts correctly by due', () => { const one = fromLine({ line: '- [x] bring out the trash 📅 2021-09-12', path: '', }); const two = fromLine({ line: '- [ ] pet the cat 📅 2021-09-15', path: '', }); const three = fromLine({ line: '- [ ] pet the cat 📅 2021-09-18', path: '', }); expect( Sort.by( { sorting: [ { property: 'due', reverse: false, propertyInstance: 1, }, ], }, [one, two, three], ), ).toEqual([one, two, three]); expect( Sort.by( { sorting: [ { property: 'due', reverse: false, propertyInstance: 1, }, ], }, [two, three, one], ), ).toEqual([one, two, three]); }); it('sorts correctly by done', () => { const one = fromLine({ line: '- [x] pet the cat 📅 2021-09-15 ✅ 2021-09-15', path: '', }); const two = fromLine({ line: '- [x] pet the cat 📅 2021-09-16 ✅ 2021-09-16', path: '', }); const three = fromLine({ line: '- [ ] bring out the trash 📅 2021-09-12', path: '', }); expect( Sort.by( { sorting: [ { property: 'done', reverse: false, propertyInstance: 1, }, ], }, [three, two, one], ), ).toEqual([one, two, three]); expect( Sort.by( { sorting: [ { property: 'done', reverse: false, propertyInstance: 1, }, ], }, [two, one, three], ), ).toEqual([one, two, three]); }); it('sorts correctly by due, path, status', () => { const one = fromLine({ line: '- [ ] a 📅 1970-01-01', path: '1' }); const two = fromLine({ line: '- [ ] c 📅 1970-01-02', path: '1' }); const three = fromLine({ line: '- [ ] d 📅 1970-01-02', path: '2' }); const four = fromLine({ line: '- [x] b 📅 1970-01-02', path: '2' }); const expectedOrder = [ one, // Sort by due date first. two, // Same due as the rest, but lower path. three, // Same as b, but not done. four, // Done tasks are sorted after open tasks for status. ]; expect( Sort.by( { sorting: [ { property: 'due', reverse: false, propertyInstance: 1, }, { property: 'path', reverse: false, propertyInstance: 1, }, { property: 'status', reverse: false, propertyInstance: 1, }, ], }, [one, four, two, three], ), ).toEqual(expectedOrder); }); it('sorts correctly by description, done', () => { const one = fromLine({ line: '- [ ] a 📅 1970-01-02 ✅ 1971-01-01', path: '', }); const two = fromLine({ line: '- [ ] a 📅 1970-01-02 ✅ 1971-01-03', path: '', }); const three = fromLine({ line: '- [ ] b 📅 1970-01-01 ✅ 1971-01-01', path: '', }); const four = fromLine({ line: '- [ ] b 📅 1970-01-02 ✅ 1971-01-02', path: '', }); const expectedOrder = [one, two, three, four]; expect( Sort.by( { sorting: [ { property: 'description', reverse: false, propertyInstance: 1, }, { property: 'done', reverse: false, propertyInstance: 1, }, ], },
).toEqual(expectedOrder); }); it('sorts correctly by description reverse, done', () => { const one = fromLine({ line: '- [ ] b 📅 1970-01-01 ✅ 1971-01-01', path: '', }); const two = fromLine({ line: '- [ ] b 📅 1970-01-02 ✅ 1971-01-02', path: '', }); const three = fromLine({ line: '- [ ] a 📅 1970-01-02 ✅ 1971-01-01', path: '', }); const four = fromLine({ line: '- [ ] a 📅 1970-01-02 ✅ 1971-01-03', path: '', }); const expectedOrder = [one, two, three, four]; expect( Sort.by( { sorting: [ { property: 'description', reverse: true, propertyInstance: 1, }, { property: 'done', reverse: false, propertyInstance: 1, }, ], }, [two, four, three, one], ), ).toEqual(expectedOrder); }); it('sorts correctly by complex sorting incl. reverse', () => { const one = fromLine({ line: '- [x] a 📅 1970-01-03', path: '3' }); const two = fromLine({ line: '- [x] c 📅 1970-01-02', path: '2' }); const three = fromLine({ line: '- [x] d 📅 1970-01-02', path: '3' }); const four = fromLine({ line: '- [ ] d 📅 1970-01-02', path: '2' }); const five = fromLine({ line: '- [ ] b 📅 1970-01-02', path: '3' }); const six = fromLine({ line: '- [ ] d 📅 1970-01-01', path: '2' }); const expectedOrder = [one, two, three, four, five, six]; expect( Sort.by( { sorting: [ { property: 'status', reverse: true, propertyInstance: 1, }, { property: 'due', reverse: true, propertyInstance: 1 }, { property: 'path', reverse: false, propertyInstance: 1, }, ], }, [six, five, one, four, three, two], ), ).toEqual(expectedOrder); }); it('sorts correctly by the link name and not the markdown', () => { const one = fromLine({ line: '- [ ] *ZZZ An early task that starts with an A; actually not italic since only one asterisk', }); const two = fromLine({ line: '- [ ] [[Better be second]] with bla bla behind it', }); const three = fromLine({ line: '- [ ] [[Another|Third it should be]] and not [last|ZZZ]', }); const four = fromLine({ line: '- [ ] *Very italic text*', }); const five = fromLine({ line: '- [ ] [@Zebra|Zebra] should be last for Zebra', }); const expectedOrder = [one, two, three, four, five]; expect( Sort.by( { sorting: [ { property: 'description', reverse: false, propertyInstance: 1, }, ], }, [two, one, five, four, three], ), ).toEqual(expectedOrder); }); }); /* * All the test cases below have tasks with 0 or more tags against them. This is to * ensure that the sorting can handle the ordering correctly when there are no tags or * if one of th tasks has less tags than the other. * * There is also a task with additional characters in the name to ensure it is seen * as bigger that one with the same initial characters. */ describe('Sort by tags', () => { it('should sort correctly by tag defaulting to first with no global filter', () => { // Arrange const t1 = fromLine({ line: '- [ ] a #aaa #jjj' }); const t2 = fromLine({ line: '- [ ] a #bbb #iii' }); const t3 = fromLine({ line: '- [ ] a #ccc #bbb' }); const t4 = fromLine({ line: '- [ ] a #ddd #ggg' }); const t5 = fromLine({ line: '- [ ] a #eee #fff' }); const t6 = fromLine({ line: '- [ ] a #fff #aaa' }); const t7 = fromLine({ line: '- [ ] a #ggg #ccc' }); const t8 = fromLine({ line: '- [ ] a #hhh #eee' }); const t9 = fromLine({ line: '- [ ] a #iii #ddd' }); const t10 = fromLine({ line: '- [ ] a #jjj #hhh' }); const expectedOrder = [t1, t2, t3, t4, t5, t6, t7, t8, t9, t10]; // Act / Assert expect( Sort.by( { sorting: [ { property: 'tag', reverse: false, propertyInstance: 1, }, ], }, [t1, t3, t5, t7, t6, t4, t2, t8, t9, t10], ), ).toEqual(expectedOrder); }); it('should sort correctly reversed by tag defaulting to first with no global filter', () => { // Arrange const t1 = fromLine({ line: '- [ ] a #aaa #jjj' }); const t2 = fromLine({ line: '- [ ] a #bbb #iii' }); const t3 = fromLine({ line: '- [ ] a #ccc #bbb' }); const t4 = fromLine({ line: '- [ ] a #ddd #ggg' }); const t5 = fromLine({ line: '- [ ] a #eee #fff' }); const t6 = fromLine({ line: '- [ ] a #fff #aaa' }); const t7 = fromLine({ line: '- [ ] a #ggg #ccc' }); const t8 = fromLine({ line: '- [ ] a #hhh #eee' }); const t9 = fromLine({ line: '- [ ] a #iii #ddd' }); const t10 = fromLine({ line: '- [ ] a #jjj #hhh' }); const expectedOrder = [t10, t9, t8, t7, t6, t5, t4, t3, t2, t1]; // Act / Assert expect( Sort.by( { sorting: [ { property: 'tag', reverse: true, propertyInstance: 1, }, ], }, [t1, t3, t5, t7, t6, t4, t2, t8, t9, t10], ), ).toEqual(expectedOrder); }); it('should sort correctly by second tag with no global filter', () => { const t1 = fromLine({ line: '- [ ] a #fff #aaa' }); const t2 = fromLine({ line: '- [ ] a #ccc #bbb' }); const t3 = fromLine({ line: '- [ ] a #ggg #ccc' }); const t4 = fromLine({ line: '- [ ] a #iii #ddd' }); const t5 = fromLine({ line: '- [ ] a #hhh #eee' }); const expectedOrder = [t1, t2, t3, t4, t5]; expect( Sort.by( { sorting: [ { property: 'tag', reverse: false, propertyInstance: 2, }, ], }, [t4, t3, t2, t1, t5], ), ).toEqual(expectedOrder); }); it('should sort correctly reversed by second tag with no global filter', () => { const t1 = fromLine({ line: '- [ ] a #fff #aaa' }); const t2 = fromLine({ line: '- [ ] a #ccc #bbb' }); const t3 = fromLine({ line: '- [ ] a #ggg #ccc' }); const t4 = fromLine({ line: '- [ ] a #iii #ddd' }); const t5 = fromLine({ line: '- [ ] a #hhh #eee' }); const expectedOrder = [t5, t4, t3, t2, t1]; expect( Sort.by( { sorting: [ { property: 'tag', reverse: true, propertyInstance: 2, }, ], }, [t4, t3, t2, t1, t5], ), ).toEqual(expectedOrder); }); it('should sort correctly by tag defaulting to first with global filter', () => { // Arrange const originalSettings = getSettings(); updateSettings({ globalFilter: '#task' }); const t1 = fromLine({ line: '- [ ] #task a #aaa #jjj' }); const t2 = fromLine({ line: '- [ ] #task a #aaaa #aaaa' }); const t3 = fromLine({ line: '- [ ] #task a #bbb #iii' }); const t4 = fromLine({ line: '- [ ] #task a #bbbb ' }); const t5 = fromLine({ line: '- [ ] #task a #ccc #bbb' }); const t6 = fromLine({ line: '- [ ] #task a #ddd #ggg' }); const t7 = fromLine({ line: '- [ ] #task a #eee #fff' }); const t8 = fromLine({ line: '- [ ] #task a #fff #aaa' }); const t9 = fromLine({ line: '- [ ] #task a #ggg #ccc' }); const t10 = fromLine({ line: '- [ ] #task a #hhh #eee' }); const t11 = fromLine({ line: '- [ ] #task a #iii #ddd' }); const t12 = fromLine({ line: '- [ ] #task a #jjj #hhh' }); const t13 = fromLine({ line: '- [ ] #task a' }); const expectedOrder = [ t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, ]; // Act expect( Sort.by( { sorting: [ { property: 'tag', reverse: false, propertyInstance: 1, }, ], }, [t1, t12, t3, t13, t5, t7, t6, t4, t2, t8, t9, t10, t11], ), ).toEqual(expectedOrder); // Cleanup updateSettings(originalSettings); }); it('should sort correctly reversed by tag defaulting to first with global filter', () => { // Arrange const originalSettings = getSettings(); updateSettings({ globalFilter: '#task' }); const t1 = fromLine({ line: '- [ ] #task a #aaa #jjj' }); const t2 = fromLine({ line: '- [ ] #task a #aaaa #aaaa' }); const t3 = fromLine({ line: '- [ ] #task a #bbb #iii' }); const t4 = fromLine({ line: '- [ ] #task a #bbbb ' }); const t5 = fromLine({ line: '- [ ] #task a #ccc #bbb' }); const t6 = fromLine({ line: '- [ ] #task a #ddd #ggg' }); const t7 = fromLine({ line: '- [ ] #task a #eee #fff' }); const t8 = fromLine({ line: '- [ ] #task a #fff #aaa' }); const t9 = fromLine({ line: '- [ ] #task a #ggg #ccc' }); const t10 = fromLine({ line: '- [ ] #task a #hhh #eee' }); const t11 = fromLine({ line: '- [ ] #task a #iii #ddd' }); const t12 = fromLine({ line: '- [ ] #task a #jjj #hhh' }); const t13 = fromLine({ line: '- [ ] #task a' }); const expectedOrder = [ t13, t12, t11, t10, t9, t8, t7, t6, t5, t4, t3, t2, t1, ]; // Act expect( Sort.by( { sorting: [ { property: 'tag', reverse: true, propertyInstance: 1, }, ], }, [t1, t12, t3, t13, t5, t7, t6, t4, t2, t8, t9, t10, t11], ), ).toEqual(expectedOrder); // Cleanup updateSettings(originalSettings); }); it('should sort correctly by second tag with global filter', () => { // Arrange const originalSettings = getSettings(); updateSettings({ globalFilter: '#task' }); const t1 = fromLine({ line: '- [ ] #task a #fff #aaa' }); const t2 = fromLine({ line: '- [ ] #task a #aaaa #aaaa' }); const t3 = fromLine({ line: '- [ ] #task a #ccc #bbb' }); const t4 = fromLine({ line: '- [ ] #task a #ggg #ccc' }); const t5 = fromLine({ line: '- [ ] #task a #bbb #iii' }); const t6 = fromLine({ line: '- [ ] #task a #aaa #jjj' }); const t7 = fromLine({ line: '- [ ] #task a #bbbb' }); const t8 = fromLine({ line: '- [ ] #task a' }); const expectedOrder = [t1, t2, t3, t4, t5, t6, t7, t8]; // Act const result = Sort.by( { sorting: [ { property: 'tag', reverse: false, propertyInstance: 2, }, ], }, [t4, t7, t5, t2, t3, t1, t8, t6], ); // Assert expect(result).toEqual(expectedOrder); // Cleanup updateSettings(originalSettings); }); it('should sort correctly reversed by second tag with global filter', () => { // Arrange const originalSettings = getSettings(); updateSettings({ globalFilter: '#task' }); const t1 = fromLine({ line: '- [ ] #task a #fff #aaa' }); const t2 = fromLine({ line: '- [ ] #task a #aaaa #aaaa' }); const t3 = fromLine({ line: '- [ ] #task a #ccc #bbb' }); const t4 = fromLine({ line: '- [ ] #task a #ggg #ccc' }); const t5 = fromLine({ line: '- [ ] #task a #bbb #iii' }); const t6 = fromLine({ line: '- [ ] #task a #aaa #jjj' }); const t7 = fromLine({ line: '- [ ] #task a #bbbb' }); const t8 = fromLine({ line: '- [ ] #task a' }); const expectedOrder = [t8, t7, t6, t5, t4, t3, t2, t1]; // Act const result = Sort.by( { sorting: [ { property: 'tag', reverse: true, propertyInstance: 2, }, ], }, [t4, t7, t5, t2, t3, t1, t8, t6], ); // Assert expect(result).toEqual(expectedOrder); // Cleanup updateSettings(originalSettings); }); });
[three, one, two, four], ),
dump_sqlite.py
import os, sys, time, sqlite3 from collections import defaultdict as dd from lxml import etree import argparse ################################################################################ # TO DO ################################################################################ # - warn if requested docs do not exist, only get data for them # - get chunks # - get sentiment chunks # - rename DTD to user_stamp time_stamp # - ################################################################################ def create_output(corpus):
def dump_tree_to_file(output, filename='dump.xml'): with open(filename, 'w') as so: so.write(output) print('{} successfully created'.format(filename)) def inf_dd(): return dd(inf_dd) def valid_xml(c): codepoint = ord(c) # conditions ordered by presumed frequency return ( 0x20 <= codepoint <= 0xD7FF or codepoint in (0x9, 0xA, 0xD) or 0xE000 <= codepoint <= 0xFFFD or 0x10000 <= codepoint <= 0x10FFFF ) def main(db_path,docs): cwd = os.path.dirname(os.path.abspath(__file__)) db_path = os.path.join(cwd, db_path) ################################################################################ # CONNECT TO DB ################################################################################ with sqlite3.connect(db_path) as con: c = con.cursor() ################################################################################ c.execute("""SELECT docid,doc,title,url, subtitle, corpusID FROM doc """) sql_docs = c.fetchall() c.execute("""SELECT sid, docID, pid, sent, comment, usrname FROM sent """) sql_sents = c.fetchall() sents_by_doc_dd = dd(list) for s in sql_sents: sents_by_doc_dd[s[1]].append(s) c.execute("""SELECT sid, wid, word, pos, lemma, cfrom, cto, comment, usrname FROM word """) sql_words = c.fetchall() words_by_sent_dd = dd(list) for w in sql_words: words_by_sent_dd[w[0]].append(w) c.execute("""SELECT sid, cid, clemma, tag, tags, comment, ntag, usrname FROM concept """) sql_concepts = c.fetchall() concepts_by_sent_dd = dd(list) for cc in sql_concepts: concepts_by_sent_dd[cc[0]].append(cc) c.execute("""SELECT sid, wid, cid, usrname FROM cwl """) sql_cwl = c.fetchall() cwl_by_sent_dd = dd(lambda: dd(list)) for cwl in sql_cwl: sid, wid, cid, usrname = cwl cwl_by_sent_dd[sid][cid].append(wid) c.execute("""SELECT sid, cid, score, username FROM sentiment """) sql_senti = c.fetchall() senti_by_sent_dd = dd(lambda: dd(tuple)) for (sid, cid, score, username) in sql_senti: senti_by_sent_dd[sid][cid] = (score, username) ################################################################################ # BUILD THE XML ################################################################################ corpus = etree.Element("Corpus") corpus.set("corpusID", "ntumc") corpus.set("title", "NTU Multilingual Corpus") corpus.set("language", "multilingual") for d in sql_docs: docid, docname, doctitle, docurl, docsub, doccoll = d if docname not in docs: continue doclang = "eng" document = etree.SubElement(corpus, "Document") document.set("docID", "d" + str(docid)) document.set("doc", docname) document.set("language", doclang) document.set("title", doctitle) if docsub: document.set("subtitle", docsub) if docurl: document.set("url", docurl) if doccoll: document.set("collection", str(doccoll)) for s in sents_by_doc_dd[docid]: sid, docID, pid, sent, comment, user = s sentence = etree.SubElement(document, "Sentence") sentence.set("sid", "s" + str(sid)) try: sentence.set("sent", sent) except: cleaned_sent = ''.join(c for c in sent if valid_xml(c)) sentence.set("sent", cleaned_sent) if pid: sentence.set("pid", str(pid)) if comment: sentence.set("comment", comment) if user: sentence.set("last_changed_by", user) for w in words_by_sent_dd[sid]: sid, wid, word_surf, pos, lemma, cfrom, cto, comment, user = w word = etree.SubElement(sentence, "Word") word.set("wid", "s" + str(sid) + "w" + str(wid)) try: word.set("surface_form", word_surf) except: cleaned_word = ''.join(c for c in word_surf if valid_xml(c)) word.set("surface_form", cleaned_word) if pos: word.set("pos", pos) if lemma: word.set("lemma", lemma) if cfrom: word.set("cfrom", str(cfrom)) if cto: word.set("cto", str(cto)) if comment: word.set("comment", comment) if user: word.set("last_changed_by", user) for cc in concepts_by_sent_dd[sid]: sid, cid, clemma, tag, tags, comment, ntag, user = cc concept = etree.SubElement(sentence, "Concept") concept.set("cid", "s" + str(sid) + "c" + str(cid)) wids = "" for wid in cwl_by_sent_dd[sid][cid]: wids += "s" + str(sid) + "w" + str(wid) + " " wids = wids.strip() concept.set("wid", wids) if clemma: concept.set("clemma", clemma) if tag: concept.set("synset_tag", tag) if comment: concept.set("comment", comment) if user: concept.set("last_changed_by", user) if senti_by_sent_dd[sid][cid]: score, username = senti_by_sent_dd[sid][cid] if abs(score) > 100: print("score too high!", sid, cid, score) else: score = round(score/100.0,2) concept_tag = etree.SubElement(concept, "Tag") concept_tag.set("category","sentiment") concept_tag.set("value", str(score)) if username: concept_tag.set("last_changed_by", username) # chunk = etree.SubElement(sentence, "Chunk") # chunk.set("chid","ch1") # chunk.set("wid","w2 w3") # chunk_tag = etree.SubElement(chunk, "Tag") # chunk_tag.set("category","sentiment") # chunk_tag.set("value","0.8") return corpus if __name__ == '__main__': usage = "Correct usage: python dump_sqlite.py.py -d <path_to_db_file>" parser = argparse.ArgumentParser(description="Dumps given sqlite file to xml validated against ntumc.dtd") parser.add_argument("-d", "--database", help="database file") parser.add_argument("-o", "--output", help="output to file in current folder", action="store_true", default=False) parser.add_argument("--docs", help="list of docs to output", nargs="*", default=[]) options = parser.parse_args() if options.database: db_path = options.database corpus = main(db_path,options.docs) output = create_output(corpus) if options.output: filename = os.path.basename(db_path).split(".sqlite")[0] + '.xml' dump_tree_to_file(output, filename) else: print(usage) sys.exit(0)
output = """<?xml version="1.0" encoding="UTF-8"?>\n<!DOCTYPE Corpus SYSTEM "ntumc.dtd">""" output += etree.tostring(corpus, pretty_print=True).decode() return output
initialState.js
export default { datesPicked: [], formFields: { one: "", two: "", three: "", four: "",
five: "", six: "", seven: "" } };
names.py
SUBSTITUTIONS = { "ODP20": ("Stratospheric Ozone Depletion", "20 year timescale"), "ODP100": ("Stratospheric Ozone Depletion", "100 year timescale"), "ODPinfinite": ("Stratospheric Ozone Depletion", "Infinite timescale"), "EOFP": ("Ozone Formation", "Damage to Ecosystems"), "HOFP": ("Ozone Formation", "Damage to Humans"), "I": "Individualist", "H": "Hierarchist", "E": "Egalitarian", "Individualistic": "Individualist", "Hierarchic": "Hierarchist", "PMFP": "Particulate Matter Formation", "GWP20": ("Global Warming", "20 year timescale"), "GWP100": ("Global Warming", "100 year timescale"), "GWP1000": ("Global Warming", "1000 year timescale"), "AP": "Terrestrial Acidification", "FEP": "Freshwater Eutrophication", "ETPterrestrial": ("Ecotoxicity", "Terrestrial"), "ETPfw": ("Ecotoxicity", "Freshwater"), "ETPmarine": ("Ecotoxicity", "Marine"), "CFhuman-mid-ncarc": ("Toxicity", "Non-carcinogenic"), "CFhuman-mid-carc": ("Toxicity", "Carcinogenic"), "SOP": "Mineral Resource Scarcity", "IRP": "Ionizing Radiation", } def
(data): for ds in data: for k, v in SUBSTITUTIONS.items(): name = [] for elem in ds["name"]: try: r = SUBSTITUTIONS[elem] if isinstance(r, str): name.append(r) else: name.extend(list(r)) except KeyError: name.append(elem) ds["name"] = tuple(name) return data
final_method_name
flask_pystmark.py
from flask import current_app from pystmark import (send, send_batch, get_delivery_stats, get_bounces, get_bounce, get_bounce_dump, get_bounce_tags, activate_bounce, Message as _Message) from __about__ import __version__, __title__, __description__ __all__ = ['__version__', '__title__', '__description__', 'Pystmark', 'Message'] class Pystmark(object): ''' A wrapper around the Simple API of pystmark. Refer to http://pystmark.readthedocs.org/en/latest/api.html#simple-api for more details. :param app: Flask app to initialize with. Defaults to `None` ''' def __init__(self, app=None): if app is not None: self.init_app(app) def init_app(self, app): ''' Initialize Pystmark with a Flask app ''' app.pystmark = self def send(self, message, **request_args): '''Send a message. :param message: Message to send. :type message: `dict` or :class:`Message` :param \\*\\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`pystmark.SendResponse` ''' return self._pystmark_call(send, message, **request_args) def send_batch(self, messages, **request_args): '''Send a batch of messages. :param messages: Messages to send. :type message: A list of `dict` or :class:`Message` :param \\*\\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`pystmark.BatchSendResponse` ''' return self._pystmark_call(send_batch, messages, **request_args) def get_delivery_stats(self, **request_args): '''Get delivery stats for your Postmark account. :param \\*\\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`pystmark.DeliveryStatsResponse` ''' return self._pystmark_call(get_delivery_stats, **request_args) def get_bounces(self, **request_args): '''Get a paginated list of bounces. :param \\*\\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`pystmark.BouncesResponse` ''' return self._pystmark_call(get_bounces, **request_args)
:param \\*\\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`pystmark.BounceTagsResponse` ''' return self._pystmark_call(get_bounce_tags, **request_args) def get_bounce(self, bounce_id, **request_args): '''Get a single bounce. :param bounce_id: The bounce's id. Get the id with :func:`get_bounces`. :param \\*\\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`pystmark.BounceResponse` ''' return self._pystmark_call(get_bounce, bounce_id, **request_args) def get_bounce_dump(self, bounce_id, **request_args): '''Get the raw email dump for a single bounce. :param bounce_id: The bounce's id. Get the id with :func:`get_bounces`. :param \\*\\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`pystmark.BounceDumpResponse` ''' return self._pystmark_call(get_bounce_dump, bounce_id, **request_args) def activate_bounce(self, bounce_id, **request_args): '''Activate a deactivated bounce. :param bounce_id: The bounce's id. Get the id with :func:`get_bounces`. :param \\*\\*request_args: Keyword arguments to pass to :func:`requests.request`. :rtype: :class:`pystmark.BounceActivateResponse` ''' return self._pystmark_call(activate_bounce, bounce_id, **request_args) def _pystmark_call(self, method, *args, **kwargs): ''' Wraps a call to the pystmark Simple API, adding configured settings ''' kwargs = self._apply_config(**kwargs) return method(*args, **kwargs) @staticmethod def _apply_config(**kwargs): '''Adds the current_app's pystmark configuration to a dict. If a configuration value has been specified in \\*\\*kwargs, it will not be overriden by the app's configuration. :param kwargs: Keyword arguments to be passed to the pystmark Simple API ''' kwargs = dict(**kwargs) kwargs.setdefault('api_key', current_app.config['PYSTMARK_API_KEY']) kwargs.setdefault('secure', current_app.config.get('PYSTMARK_HTTPS', True)) kwargs.setdefault('test', current_app.config.get('PYSTMARK_TEST_API', False)) return kwargs class Message(_Message): ''' A container for message(s) to send to the Postmark API. You can populate this message with defaults for initializing an :class:`Interface` from the pystmark library. The message will be combined with the final message and verified before transmission. Refer to http://pystmark.readthedocs.org/en/latest/api.html#message-object for more details. :param sender: Email address of the sender. Defaults to PYSTMARK_DEFAULT_SENDER if defined. :param to: Destination email address. :param cc: A list of cc'd email addresses. :param bcc: A list of bcc'd email address. :param subject: The message subject. :param tag: Tag your emails with this. :param html: HTML body content. :param text: Text body content. :param reply_to: Email address to reply to. Defaults to PYSTMARK_DEFAULT_REPLY_TO, if defined. :param headers: Additional headers to include with the email. If you do not have the headers formatted for the Postmark API, use :meth:`Message.add_header`. Defaults to PYSTMARK_DEFAULT_HEADERS, if defined. :type headers: A list of `dict`, each with the keys 'Name' and 'Value'. :param attachments: Attachments to include with the email. If you do not have the attachments formatted for the Postmark API, use :meth:`Message.attach_file` or :meth:`Message.attach_binary`. :type attachments: A list of `dict`, each with the keys 'Name', 'Content' and 'ContentType'. :param verify: Verify the message when initialized. Defaults to PYSTMARK_VERIFY_MESSAGES if provided, otherwise `False`. ''' def __init__(self, sender=None, to=None, cc=None, bcc=None, subject=None, tag=None, html=None, text=None, reply_to=None, headers=None, attachments=None, verify=None, track_opens=None): if sender is None: sender = current_app.config.get('PYSTMARK_DEFAULT_SENDER') if reply_to is None: reply_to = current_app.config.get('PYSTMARK_DEFAULT_REPLY_TO') if headers is None: headers = current_app.config.get('PYSTMARK_DEFAULT_HEADERS') if verify is None: verify = current_app.config.get('PYSTMARK_VERIFY_MESSAGES', False) super(Message, self).__init__(sender=sender, to=to, cc=cc, bcc=bcc, subject=subject, tag=tag, html=html, text=text, reply_to=reply_to, headers=headers, attachments=attachments, verify=verify, track_opens=track_opens)
def get_bounce_tags(self, **request_args): '''Get a list of tags for bounces associated with your Postmark server.
config.rs
//! Loads configuration from multiple sources use anyhow::{Error, Result}; use once_cell::sync::OnceCell; use serde::Deserialize; static INSTANCE: OnceCell<Config> = OnceCell::new(); /// Global configuration #[derive(Deserialize, Debug)] pub struct Config { #[serde(skip, default)] pub build: Build, pub hardware: crate::hardware::config::Hardware, pub web: crate::web::config::Web, } impl Config { /// Loads the global config (may only be used once) pub fn load() -> Result<()> { let config_file = std::fs::read_to_string("habctl.toml")?; let config = toml::from_str(&config_file)?;
.map_err(|_| Error::msg("already loaded"))?; Ok(()) } /// Gets the global config pub fn get() -> &'static Config { INSTANCE.get().expect("config not loaded") } } /// Configuration captured at build time #[derive(Debug)] pub struct Build { pub name: &'static str, pub version: &'static str, } impl Default for Build { fn default() -> Self { Self { name: env!("CARGO_PKG_NAME"), version: env!("CARGO_PKG_VERSION"), } } }
INSTANCE .set(config)
api_op_CreateCostCategoryDefinition.go
// Code generated by smithy-go-codegen DO NOT EDIT. package costexplorer import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/costexplorer/types" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Creates a new Cost Category with the requested name and rules. func (c *Client) CreateCostCategoryDefinition(ctx context.Context, params *CreateCostCategoryDefinitionInput, optFns ...func(*Options)) (*CreateCostCategoryDefinitionOutput, error) { if params == nil { params = &CreateCostCategoryDefinitionInput{} } result, metadata, err := c.invokeOperation(ctx, "CreateCostCategoryDefinition", params, optFns, addOperationCreateCostCategoryDefinitionMiddlewares) if err != nil { return nil, err } out := result.(*CreateCostCategoryDefinitionOutput) out.ResultMetadata = metadata return out, nil } type CreateCostCategoryDefinitionInput struct { // The unique name of the Cost Category. // // This member is required. Name *string // The rule schema version in this particular Cost Category. // // This member is required. RuleVersion types.CostCategoryRuleVersion // The Cost Category rules used to categorize costs. For more information, see // CostCategoryRule // (https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_CostCategoryRule.html). // // This member is required. Rules []*types.CostCategoryRule } type CreateCostCategoryDefinitionOutput struct { // The unique identifier for your newly created Cost Category. CostCategoryArn *string // The Cost Category's effective start date. EffectiveStart *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addOperationCreateCostCategoryDefinitionMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateCostCategoryDefinition{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateCostCategoryDefinition{}, middleware.After) if err != nil
if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddAttemptClockSkewMiddleware(stack); err != nil { return err } if err = addClientUserAgent(stack); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addOpCreateCostCategoryDefinitionValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateCostCategoryDefinition(options.Region), middleware.Before); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } func newServiceMetadataMiddleware_opCreateCostCategoryDefinition(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "ce", OperationName: "CreateCostCategoryDefinition", } }
{ return err }
field_structure_filter.ts
/* * Copyright 2022 Salto Labs Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { BuiltinTypes, CORE_ANNOTATIONS, Element, ElemIdGetter, Field, InstanceElement, isInstanceElement, isObjectType, ListType, MapType, ObjectType, ReferenceExpression, ServiceIds, Values } from '@salto-io/adapter-api' import { naclCase, pathNaclCase } from '@salto-io/adapter-utils' import { config as configUtils, elements as elementUtils } from '@salto-io/adapter-components' import { logger } from '@salto-io/logging' import _ from 'lodash' import { values } from '@salto-io/lowerdash' import { JIRA } from '../../constants' import { JiraConfig } from '../../config' import { FilterCreator } from '../../filter' import { FIELD_CONTEXT_DEFAULT_TYPE_NAME, FIELD_CONTEXT_OPTION_TYPE_NAME, FIELD_CONTEXT_TYPE_NAME, FIELD_TYPE_NAME } from './constants' const { generateInstanceNameFromConfig } = elementUtils const log = logger(module) const addTypeValue = (instance: InstanceElement): void => { if (instance.value.schema?.custom !== undefined) { instance.value.type = instance.value.schema.custom delete instance.value.schema } } const addDefaultValuesToContexts = ( instance: InstanceElement, idToContext: Record<string, Values> ): void => { (instance.value.contextDefaults ?? []).forEach((contextDefault: Values) => { if (idToContext[contextDefault.contextId] === undefined) { log.warn(`Context with id ${contextDefault.contextId} not found in instance ${instance.elemID.getFullName()} when assigning context defaults`) return } idToContext[contextDefault.contextId].defaultValue = _.omit(contextDefault, 'contextId') }) delete instance.value.contextDefaults } const addPropertyToContexts = ( { instance, idToContext, allPropertiesFieldName, propertyFieldName, isGlobalFieldName, destinationFieldName, }: { instance: InstanceElement idToContext: Record<string, Values> allPropertiesFieldName: string propertyFieldName: string isGlobalFieldName: string destinationFieldName: string } ): void => { (instance.value[allPropertiesFieldName] ?? []) .filter((property: Values) => !property[isGlobalFieldName]) .forEach((property: Values) => { if (idToContext[property.contextId] === undefined) { log.warn(`Context with id ${property.contextId} not found in instance ${instance.elemID.getFullName()} when assigning ${destinationFieldName}`) return } if (idToContext[property.contextId][destinationFieldName] === undefined) { idToContext[property.contextId][destinationFieldName] = [] } idToContext[property.contextId][destinationFieldName].push(property[propertyFieldName]) }) delete instance.value[allPropertiesFieldName] } const addIssueTypesToContexts = ( instance: InstanceElement, idToContext: Record<string, Values> ): void => addPropertyToContexts({ instance, idToContext, allPropertiesFieldName: 'contextIssueTypes', propertyFieldName: 'issueTypeId', isGlobalFieldName: 'isAnyIssueType', destinationFieldName: 'issueTypeIds', }) const addProjectsToContexts = ( instance: InstanceElement, idToContext: Record<string, Values> ): void => addPropertyToContexts({ instance, idToContext, allPropertiesFieldName: 'contextProjects', propertyFieldName: 'projectId', isGlobalFieldName: 'isGlobalContext', destinationFieldName: 'projectIds', }) const addCascadingOptionsToOptions = (instance: InstanceElement): void => { instance.value.contexts ?.filter((context: Values) => context.options !== undefined) .forEach((context: Values) => { const idToOption = _.keyBy(context.options, option => option.id) context.options .filter((option: Values) => option.optionId !== undefined) .forEach((option: Values) => { if (idToOption[option.optionId].cascadingOptions === undefined) { idToOption[option.optionId].cascadingOptions = {} } idToOption[option.optionId].cascadingOptions[naclCase(option.value)] = { ..._.omit(option, 'optionId'), position: Object.keys(idToOption[option.optionId].cascadingOptions).length, } }) context.options = context.options.filter((option: Values) => option.optionId === undefined) }) } const transformOptionsToMap = (instance: InstanceElement): void => { instance.value.contexts ?.filter((context: Values) => context.options !== undefined) .forEach((context: Values) => { const optionsWithIndex = context.options .map((option: Values, position: number) => ({ ...option, position })) context.options = _.keyBy(optionsWithIndex, option => naclCase(option.value)) }) } const getServiceIds = ( instanceValues: Values,
): ServiceIds | undefined => { const { serviceIdField } = configUtils.getConfigWithDefault( config.apiDefinitions.types[type.elemID.name].transformation, config.apiDefinitions.typeDefaults.transformation ) return serviceIdField !== undefined ? elementUtils.createServiceIds(instanceValues, serviceIdField, type.elemID) : undefined } const createContextInstance = ( context: Values, contextType: ObjectType, parentField: InstanceElement, config: JiraConfig, getElemIdFunc?: ElemIdGetter, ): InstanceElement => { const contextName = generateInstanceNameFromConfig( context, contextType.elemID.typeName, config.apiDefinitions ) ?? context.id const defaultName = naclCase([parentField.elemID.name, contextName].join('_')) const serviceIds = getServiceIds(context, contextType, config) const instanceName = getElemIdFunc && serviceIds ? getElemIdFunc(JIRA, serviceIds, defaultName).name : defaultName return new InstanceElement( instanceName, contextType, context, parentField.path && [...parentField.path, 'contexts', pathNaclCase(instanceName)], { [CORE_ANNOTATIONS.PARENT]: [new ReferenceExpression(parentField.elemID, parentField)], } ) } /** * Converts the field structure to what expected structure of the deployment endpoints and * converts list with hidden values to maps */ const filter: FilterCreator = ({ config, getElemIdFunc }) => ({ onFetch: async (elements: Element[]) => { const types = _(elements) .filter(isObjectType) .keyBy(element => element.elemID.name) .value() const fieldType = types[FIELD_TYPE_NAME] const fieldContextType = types[FIELD_CONTEXT_TYPE_NAME] const fieldContextDefaultValueType = types[FIELD_CONTEXT_DEFAULT_TYPE_NAME] const fieldContextOptionType = types[FIELD_CONTEXT_OPTION_TYPE_NAME] const missingTypes = [ fieldType === undefined ? FIELD_TYPE_NAME : undefined, fieldContextType === undefined ? FIELD_CONTEXT_TYPE_NAME : undefined, fieldContextDefaultValueType === undefined ? FIELD_CONTEXT_DEFAULT_TYPE_NAME : undefined, fieldContextOptionType === undefined ? FIELD_CONTEXT_OPTION_TYPE_NAME : undefined, ].filter(values.isDefined) if (missingTypes.length) { log.warn(`Missing types for field structure filter: ${missingTypes.join(', ')}, skipping`) return } fieldType.fields.type = new Field(fieldType, 'type', BuiltinTypes.STRING) delete fieldType.fields.contextDefaults delete fieldType.fields.contextProjects delete fieldType.fields.contextIssueTypes fieldContextType.fields.projectIds = new Field(fieldContextType, 'projectIds', new ListType(BuiltinTypes.STRING)) fieldContextType.fields.issueTypeIds = new Field(fieldContextType, 'issueTypeIds', new ListType(BuiltinTypes.STRING)) fieldContextType.fields.defaultValue = new Field(fieldContextType, 'defaultValue', fieldContextDefaultValueType) fieldContextType.fields.options = new Field(fieldType, 'options', new MapType(fieldContextOptionType)) fieldContextOptionType.fields.position = new Field(fieldContextOptionType, 'position', BuiltinTypes.NUMBER) fieldContextOptionType.fields.cascadingOptions = new Field(fieldContextOptionType, 'cascadingOptions', new MapType(fieldContextOptionType)) elements .filter(isInstanceElement) .filter(instance => instance.elemID.typeName === FIELD_TYPE_NAME) .forEach(instance => { if (instance.value.isLocked === false) { delete instance.value.isLocked } addTypeValue(instance) const idToContext = _.keyBy(instance.value.contexts ?? [], context => context.id) addDefaultValuesToContexts(instance, idToContext) addIssueTypesToContexts(instance, idToContext) addProjectsToContexts(instance, idToContext) addCascadingOptionsToOptions(instance) transformOptionsToMap(instance) const contexts = (instance.value.contexts ?? []) .map((context: Values) => createContextInstance( context, fieldContextType, instance, config, getElemIdFunc, )) delete instance.value.contexts if (instance.path !== undefined) { instance.path = [...instance.path, instance.path[instance.path.length - 1]] } elements.push(...contexts) }) }, }) export default filter
type: ObjectType, config: JiraConfig,
main.rs
//! Example to show how to store data in the context //! and use it in command handlers //! //! Base: framework-example-bot //! Book chapter: user-data.md // src/main.rs use robespierre::framework::standard::{macros::command, CommandResult, FwContext}; use robespierre::framework::standard::{Command, CommandCodeFn, StandardFramework}; use robespierre::model::MessageExt; use robespierre::Authentication; use robespierre::CacheWrap; use robespierre::Context; use robespierre::EventHandlerWrap; use robespierre::FrameworkWrap; use robespierre::UserData; use robespierre_cache::CacheConfig; use robespierre_events::Connection; use robespierre_http::Http; use robespierre_models::channels::Message; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Arc; struct CommandCounterKey; impl robespierre::typemap::Key for CommandCounterKey { type Value = Arc<AtomicUsize>; } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { tracing_subscriber::fmt::init(); let token = std::env::var("TOKEN") .expect("Cannot get token; set environment variable TOKEN=... and run again"); let auth = Authentication::bot(token); let http = Http::new(&auth).await?; let connection = Connection::connect(&auth).await?; let mut data = robespierre::typemap::ShareMap::custom(); data.insert::<CommandCounterKey>(Arc::new(AtomicUsize::new(0))); let context = Context::new(http, data).with_cache(CacheConfig::default()); let fw = StandardFramework::default() .configure(|c| c.prefix("!")) .group(|g| { g.name("General") .command(|| Command::new("ping", ping as CommandCodeFn)) .command(|| Command::new("command_counter", command_counter as CommandCodeFn)) }); let handler = FrameworkWrap::new(fw, Handler); let handler = CacheWrap::new(EventHandlerWrap::new(handler)); connection.run(context, handler).await?; Ok(()) } #[command] async fn ping(ctx: &FwContext, msg: &Message) -> CommandResult { msg.reply(ctx, "pong").await?; let data = ctx.data_lock_read().await; let counter = data.get::<CommandCounterKey>().unwrap(); counter.fetch_add(1, Ordering::SeqCst); Ok(()) } #[command] async fn command_counter(ctx: &FwContext, msg: &Message) -> CommandResult
#[derive(Clone)] struct Handler; #[robespierre::async_trait] impl robespierre::EventHandler for Handler {}
{ let data = ctx.data_lock_read().await; let counter = data.get::<CommandCounterKey>().unwrap(); let count = counter.fetch_add(1, Ordering::SeqCst); // this is itself a command, // so fetch previous count and add one. msg.reply( ctx, format!("I received {} commands since I started running", count), ) .await?; Ok(()) }
main.go
package main import ( "context" "encoding/json" "fmt" "log" "time" "net/http" "os" "strings" dapr "github.com/dapr/go-sdk/client" "github.com/dapr/go-sdk/service/common" daprd "github.com/dapr/go-sdk/service/http" "github.com/pkg/errors" ) const (
addSubscriptionError = "error subscribing to a topic" addInvocationError = "error creating invcation handler" methodName = "ping" ) var ( logger = log.New(os.Stdout, "", 0) address = getEnvVar("ADDRESS", ":8082") pubSubName = getEnvVar("PUBSUB_NAME", "pubsub") topicName = getEnvVar("TOPIC_NAME", "messages") storeName = getEnvVar("STORE_NAME", "store") client dapr.Client ) func main() { s := daprd.NewService(address) var clientErr error if client, clientErr = dapr.NewClient(); clientErr != nil { logger.Fatalf("%s: %v", clientCreateError, clientErr) } defer client.Close() if err := s.AddServiceInvocationHandler(methodName, invokeHandler); err != nil { logger.Fatalf("%s: %v", addInvocationError, err) } subscription := &common.Subscription{ PubsubName: pubSubName, Topic: topicName, Route: fmt.Sprintf("/%s", topicName), } if err := s.AddTopicEventHandler(subscription, eventHandler); err != nil { logger.Fatalf("%s: %v", addSubscriptionError, err) } if err := s.Start(); err != nil && err != http.ErrServerClosed { logger.Fatalf("%s: %v", startingServiceError, err) } } func invokeHandler(ctx context.Context, in *common.InvocationEvent) (out *common.Content, err error) { logger.Printf("Method %s invoked (ContentType:%s, Verb:%s, QueryString:%s, Data:%s)", methodName, in.ContentType, in.Verb, in.QueryString, in.Data) j := []byte(fmt.Sprintf(`{"on": %d, "greeting": "pong"}`, time.Now().UTC().UnixNano())) out = &common.Content{ContentType: in.ContentType, Data: j} return } func eventHandler(ctx context.Context, e *common.TopicEvent) (retry bool, err error) { logger.Printf("Event received (PubsubName:%s, Topic:%s, Data: %v", e.PubsubName, e.Topic, e.Data) data, ok := e.Data.([]byte) if !ok { data, err = json.Marshal(e.Data) if err != nil { return false, errors.Wrapf(err, "invalid data format: %T", e.Data) } } if err := client.SaveState(ctx, storeName, e.ID, data); err != nil { return false, errors.Wrapf(err, "error saving data to %s (%s)", storeName, data) } return false, nil } func getEnvVar(key, fallbackValue string) string { if val, ok := os.LookupEnv(key); ok { return strings.TrimSpace(val) } return fallbackValue }
addInvokeHandlerError = "error adding invocation handler" startingServiceError = "error starting service" clientCreateError = "error creating Dapr client"