file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
completion_item.rs
use std::fmt; use hir::Documentation; use ra_syntax::TextRange; use ra_text_edit::{TextEdit, TextEditBuilder}; /// `CompletionItem` describes a single completion variant in the editor pop-up. /// It is basically a POD with various properties. To construct a /// `CompletionItem`, use `new` method and the `Builder` struct. pub struct CompletionItem { /// Used only internally in tests, to check only specific kind of /// completion (postfix, keyword, reference, etc). #[allow(unused)] completion_kind: CompletionKind, /// Label in the completion pop up which identifies completion. label: String, /// Range of identifier that is being completed. /// /// It should be used primarily for UI, but we also use this to convert /// genetic TextEdit into LSP's completion edit (see conv.rs). /// /// `source_range` must contain the completion offset. `insert_text` should /// start with what `source_range` points to, or VSCode will filter out the /// completion silently. source_range: TextRange, /// What happens when user selects this item. /// /// Typically, replaces `source_range` with new identifier. text_edit: TextEdit, insert_text_format: InsertTextFormat, /// What item (struct, function, etc) are we completing. kind: Option<CompletionItemKind>, /// Lookup is used to check if completion item indeed can complete current /// ident. /// /// That is, in `foo.bar<|>` lookup of `abracadabra` will be accepted (it /// contains `bar` sub sequence), and `quux` will rejected. lookup: Option<String>, /// Additional info to show in the UI pop up. detail: Option<String>, documentation: Option<Documentation>, } // We use custom debug for CompletionItem to make `insta`'s diffs more readable. impl fmt::Debug for CompletionItem { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let mut s = f.debug_struct("CompletionItem"); s.field("label", &self.label()).field("source_range", &self.source_range()); if self.text_edit().as_atoms().len() == 1 { let atom = &self.text_edit().as_atoms()[0]; s.field("delete", &atom.delete); s.field("insert", &atom.insert); } else { s.field("text_edit", &self.text_edit); } if let Some(kind) = self.kind().as_ref() { s.field("kind", kind); } if self.lookup() != self.label() { s.field("lookup", &self.lookup()); } if let Some(detail) = self.detail() { s.field("detail", &detail); } if let Some(documentation) = self.documentation() { s.field("documentation", &documentation); } s.finish() } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum CompletionItemKind { Snippet, Keyword, Module, Function, BuiltinType, Struct, Enum, EnumVariant, Binding, Field, Static, Const, Trait, TypeAlias, Method, TypeParam, Macro, } #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub(crate) enum CompletionKind { /// Parser-based keyword completion. Keyword, /// Your usual "complete all valid identifiers". Reference, /// "Secret sauce" completions. Magic, Snippet, Postfix, BuiltinType, } #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum
{ PlainText, Snippet, } impl CompletionItem { pub(crate) fn new( completion_kind: CompletionKind, source_range: TextRange, label: impl Into<String>, ) -> Builder { let label = label.into(); Builder { source_range, completion_kind, label, insert_text: None, insert_text_format: InsertTextFormat::PlainText, detail: None, documentation: None, lookup: None, kind: None, text_edit: None, } } /// What user sees in pop-up in the UI. pub fn label(&self) -> &str { &self.label } pub fn source_range(&self) -> TextRange { self.source_range } pub fn insert_text_format(&self) -> InsertTextFormat { self.insert_text_format } pub fn text_edit(&self) -> &TextEdit { &self.text_edit } /// Short one-line additional information, like a type pub fn detail(&self) -> Option<&str> { self.detail.as_ref().map(|it| it.as_str()) } /// A doc-comment pub fn documentation(&self) -> Option<Documentation> { self.documentation.clone() } /// What string is used for filtering. pub fn lookup(&self) -> &str { self.lookup.as_ref().map(|it| it.as_str()).unwrap_or_else(|| self.label()) } pub fn kind(&self) -> Option<CompletionItemKind> { self.kind } } /// A helper to make `CompletionItem`s. #[must_use] pub(crate) struct Builder { source_range: TextRange, completion_kind: CompletionKind, label: String, insert_text: Option<String>, insert_text_format: InsertTextFormat, detail: Option<String>, documentation: Option<Documentation>, lookup: Option<String>, kind: Option<CompletionItemKind>, text_edit: Option<TextEdit>, } impl Builder { pub(crate) fn add_to(self, acc: &mut Completions) { acc.add(self.build()) } pub(crate) fn build(self) -> CompletionItem { let label = self.label; let text_edit = match self.text_edit { Some(it) => it, None => { let mut builder = TextEditBuilder::default(); builder .replace(self.source_range, self.insert_text.unwrap_or_else(|| label.clone())); builder.finish() } }; CompletionItem { source_range: self.source_range, label, insert_text_format: self.insert_text_format, text_edit, detail: self.detail, documentation: self.documentation, lookup: self.lookup, kind: self.kind, completion_kind: self.completion_kind, } } pub(crate) fn lookup_by(mut self, lookup: impl Into<String>) -> Builder { self.lookup = Some(lookup.into()); self } pub(crate) fn insert_text(mut self, insert_text: impl Into<String>) -> Builder { self.insert_text = Some(insert_text.into()); self } pub(crate) fn insert_snippet(mut self, snippet: impl Into<String>) -> Builder { self.insert_text_format = InsertTextFormat::Snippet; self.insert_text(snippet) } pub(crate) fn kind(mut self, kind: CompletionItemKind) -> Builder { self.kind = Some(kind); self } pub(crate) fn text_edit(mut self, edit: TextEdit) -> Builder { self.text_edit = Some(edit); self } pub(crate) fn snippet_edit(mut self, edit: TextEdit) -> Builder { self.insert_text_format = InsertTextFormat::Snippet; self.text_edit(edit) } #[allow(unused)] pub(crate) fn detail(self, detail: impl Into<String>) -> Builder { self.set_detail(Some(detail)) } pub(crate) fn set_detail(mut self, detail: Option<impl Into<String>>) -> Builder { self.detail = detail.map(Into::into); self } #[allow(unused)] pub(crate) fn documentation(self, docs: Documentation) -> Builder { self.set_documentation(Some(docs)) } pub(crate) fn set_documentation(mut self, docs: Option<Documentation>) -> Builder { self.documentation = docs.map(Into::into); self } } impl<'a> Into<CompletionItem> for Builder { fn into(self) -> CompletionItem { self.build() } } /// Represents an in-progress set of completions being built. #[derive(Debug, Default)] pub(crate) struct Completions { buf: Vec<CompletionItem>, } impl Completions { pub(crate) fn add(&mut self, item: impl Into<CompletionItem>) { self.buf.push(item.into()) } pub(crate) fn add_all<I>(&mut self, items: I) where I: IntoIterator, I::Item: Into<CompletionItem>, { items.into_iter().for_each(|item| self.add(item.into())) } } impl Into<Vec<CompletionItem>> for Completions { fn into(self) -> Vec<CompletionItem> { self.buf } } #[cfg(test)] pub(crate) fn do_completion(code: &str, kind: CompletionKind) -> Vec<CompletionItem> { use crate::completion::completions; use crate::mock_analysis::{analysis_and_position, single_file_with_position}; let (analysis, position) = if code.contains("//-") { analysis_and_position(code) } else { single_file_with_position(code) }; let completions = completions(&analysis.db, position).unwrap(); let completion_items: Vec<CompletionItem> = completions.into(); let mut kind_completions: Vec<CompletionItem> = completion_items.into_iter().filter(|c| c.completion_kind == kind).collect(); kind_completions.sort_by_key(|c| c.label.clone()); kind_completions } #[cfg(test)] pub(crate) fn check_completion(test_name: &str, code: &str, kind: CompletionKind) { use insta::assert_debug_snapshot_matches; let kind_completions = do_completion(code, kind); assert_debug_snapshot_matches!(test_name, kind_completions); }
InsertTextFormat
script.go
package main import ( "io" "net/http" ) func script(w http.ResponseWriter, r *http.Request)
{ w.Header().Set("Content-Type", "application/javascript") io.WriteString(w, `"use strict" let AtomKV = { BASE: new URL(document.currentScript.src).origin, get: function(key) { let url = this.BASE + key return new Promise(function(resolve, reject) { let xhr = new XMLHttpRequest() xhr.onload = function() { if (xhr.status == 404) { resolve([undefined, -1]) } else { let revision = Number(xhr.getResponseHeader('X-Revision')) resolve([JSON.parse(xhr.responseText), revision]) } } xhr.onerror = function() { reject(xhr.responseText) } xhr.open('GET', url, true) xhr.send() }) }, set: function(key, value) { let url = this.BASE + key return new Promise(function(resolve, reject) { let xhr = new XMLHttpRequest() xhr.onload = function() { resolve() } xhr.onerror = function() { reject(xhr.responseText) } xhr.open('POST', url, true) xhr.send(JSON.stringify(value)) }) }, update: function(key, value, revision) { let url = this.BASE + key return new Promise(function(resolve, reject) { let xhr = new XMLHttpRequest() xhr.onload = function() { resolve(xhr.status == 200) } xhr.onerror = function() { reject(xhr.responseText) } xhr.open('PUT', url, true) xhr.setRequestHeader('X-Revision', String(revision)) xhr.send(JSON.stringify(value)) }) }, subscribe: async function*(keypath) { let resolve = null let promise = null function reset() { promise = new Promise(function(r) {resolve = r}) } reset() let sse = new EventSource(this.BASE + keypath) sse.onmessage = function(event) { let value = JSON.parse(event.data) let [key, revision] = event.lastEventId.split(/:/) resolve([key, value, Number(revision)]) reset() } try { for (;;) { yield await promise } } finally { sse.close() } } }`) }
sysconfig.py
"""Provide access to Python's configuration information. The specific configuration variables available depend heavily on the platform and configuration. The values may be retrieved using get_config_var(name), and the list of variables is available via get_config_vars().keys(). Additional convenience functions are also available. Written by: Fred L. Drake, Jr. Email: <[email protected]> """ import _imp import os import re import sys from .errors import DistutilsPlatformError # These are needed in a couple of spots, so just compute them once. PREFIX = os.path.normpath(sys.prefix) EXEC_PREFIX = os.path.normpath(sys.exec_prefix) BASE_PREFIX = os.path.normpath(sys.base_prefix) BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix) # Path to the base directory of the project. On Windows the binary may # live in project/PCBuild/win32 or project/PCBuild/amd64. # set for cross builds if "_PYTHON_PROJECT_BASE" in os.environ: project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"]) else: project_base = os.path.dirname(os.path.abspath(sys.executable)) if (os.name == 'nt' and project_base.lower().endswith(('\\pcbuild\\win32', '\\pcbuild\\amd64'))): project_base = os.path.dirname(os.path.dirname(project_base)) # python_build: (Boolean) if true, we're either building Python or # building an extension with an un-installed Python, so we use # different (hard-wired) directories. # Setup.local is available for Makefile builds including VPATH builds, # Setup.dist is available on Windows def _is_python_source_dir(d): for fn in ("Setup.dist", "Setup.local"): if os.path.isfile(os.path.join(d, "Modules", fn)): return True return False _sys_home = getattr(sys, '_home', None) if (_sys_home and os.name == 'nt' and _sys_home.lower().endswith(('\\pcbuild\\win32', '\\pcbuild\\amd64'))): _sys_home = os.path.dirname(os.path.dirname(_sys_home)) def _python_build(): if _sys_home: return _is_python_source_dir(_sys_home) return _is_python_source_dir(project_base) python_build = _python_build() # Calculate the build qualifier flags if they are defined. Adding the flags # to the include and lib directories only makes sense for an installation, not # an in-source build. build_flags = '' try: if not python_build: build_flags = sys.abiflags except AttributeError: # It's not a configure-based build, so the sys module doesn't have # this attribute, which is fine. pass def get_python_version(): """Return a string containing the major and minor Python version, leaving off the patchlevel. Sample return values could be '1.5' or '2.2'. """ return '%d.%d' % sys.version_info[:2] def get_python_inc(plat_specific=0, prefix=None): """Return the directory containing installed Python header files. If 'plat_specific' is false (the default), this is the path to the non-platform-specific header files, i.e. Python.h and so on; otherwise, this is the path to platform-specific header files (namely pyconfig.h). If 'prefix' is supplied, use it instead of sys.base_prefix or sys.base_exec_prefix -- i.e., ignore 'plat_specific'. """ if prefix is None: prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX if os.name == "posix": if python_build: # Assume the executable is in the build directory. The # pyconfig.h file should be in the same directory. Since # the build directory may not be the source directory, we # must use "srcdir" from the makefile to find the "Include" # directory. base = _sys_home or project_base if plat_specific: return base if _sys_home: incdir = os.path.join(_sys_home, get_config_var('AST_H_DIR')) else: incdir = os.path.join(get_config_var('srcdir'), 'Include') return os.path.normpath(incdir) python_dir = 'python' + get_python_version() + build_flags return os.path.join(prefix, "include", python_dir) elif os.name == "nt": return os.path.join(prefix, "include") else: raise DistutilsPlatformError( "I don't know where Python installs its C header files " "on platform '%s'" % os.name) def get_python_lib(plat_specific=0, standard_lib=0, prefix=None): """Return the directory containing the Python library (standard or site additions). If 'plat_specific' is true, return the directory containing platform-specific modules, i.e. any module from a non-pure-Python module distribution; otherwise, return the platform-shared library directory. If 'standard_lib' is true, return the directory containing standard Python library modules; otherwise, return the directory for site-specific modules. If 'prefix' is supplied, use it instead of sys.base_prefix or sys.base_exec_prefix -- i.e., ignore 'plat_specific'. """ if prefix is None: if standard_lib: prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX else: prefix = plat_specific and EXEC_PREFIX or PREFIX if os.name == "posix": libpython = os.path.join(prefix, "lib", "python" + get_python_version()) if standard_lib: return libpython else: return os.path.join(libpython, "site-packages") elif os.name == "nt": if standard_lib: return os.path.join(prefix, "Lib") else: return os.path.join(prefix, "Lib", "site-packages") else: raise DistutilsPlatformError( "I don't know where Python installs its library " "on platform '%s'" % os.name) def customize_compiler(compiler): """Do any platform-specific customization of a CCompiler instance. Mainly needed on Unix, so we can plug in the information that varies across Unices and is stored in Python's Makefile. """ if compiler.compiler_type == "unix": if sys.platform == "darwin": # Perform first-time customization of compiler-related # config vars on OS X now that we know we need a compiler. # This is primarily to support Pythons from binary # installers. The kind and paths to build tools on # the user system may vary significantly from the system # that Python itself was built on. Also the user OS # version and build tools may not support the same set # of CPU architectures for universal builds. global _config_vars # Use get_config_var() to ensure _config_vars is initialized. if not get_config_var('CUSTOMIZED_OSX_COMPILER'): import _osx_support _osx_support.customize_compiler(_config_vars) _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS', 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') if 'CC' in os.environ: newcc = os.environ['CC'] if (sys.platform == 'darwin' and 'LDSHARED' not in os.environ and ldshared.startswith(cc)): # On OS X, if CC is overridden, use that as the default # command for LDSHARED as well ldshared = newcc + ldshared[len(cc):] cc = newcc if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: ldshared = os.environ['LDSHARED'] if 'CPP' in os.environ: cpp = os.environ['CPP'] else: cpp = cc + " -E" # not always if 'LDFLAGS' in os.environ: ldshared = ldshared + ' ' + os.environ['LDFLAGS'] if 'CFLAGS' in os.environ: cflags = opt + ' ' + os.environ['CFLAGS'] ldshared = ldshared + ' ' + os.environ['CFLAGS'] if 'CPPFLAGS' in os.environ: cpp = cpp + ' ' + os.environ['CPPFLAGS'] cflags = cflags + ' ' + os.environ['CPPFLAGS'] ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] if 'AR' in os.environ: ar = os.environ['AR'] if 'ARFLAGS' in os.environ: archiver = ar + ' ' + os.environ['ARFLAGS'] else: archiver = ar + ' ' + ar_flags cc_cmd = cc + ' ' + cflags compiler.set_executables( preprocessor=cpp, compiler=cc_cmd, compiler_so=cc_cmd + ' ' + ccshared, compiler_cxx=cxx, linker_so=ldshared, linker_exe=cc, archiver=archiver) compiler.shared_lib_extension = shlib_suffix def get_config_h_filename(): """Return full pathname of installed pyconfig.h file.""" if python_build: if os.name == "nt": inc_dir = os.path.join(_sys_home or project_base, "PC") else: inc_dir = _sys_home or project_base else: inc_dir = get_python_inc(plat_specific=1) return os.path.join(inc_dir, 'pyconfig.h') def get_makefile_filename(): """Return full pathname of installed Makefile from the Python build.""" if python_build: return os.path.join(_sys_home or project_base, "Makefile") lib_dir = get_python_lib(plat_specific=0, standard_lib=1) config_file = 'config-{}{}'.format(get_python_version(), build_flags) if hasattr(sys.implementation, '_multiarch'): config_file += '-%s' % sys.implementation._multiarch return os.path.join(lib_dir, config_file, 'Makefile') def parse_config_h(fp, g=None): """Parse a config.h-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ if g is None: g = {} define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") # while True: line = fp.readline() if not line: break m = define_rx.match(line) if m: n, v = m.group(1, 2) try: v = int(v) except ValueError: pass g[n] = v else: m = undef_rx.match(line) if m: g[m.group(1)] = 0 return g # Regexes needed for parsing Makefile (and similar syntaxes, # like old-style Setup files). _variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") def parse_makefile(fn, g=None): """Parse a Makefile-style file. A dictionary containing name/value pairs is returned. If an optional dictionary is passed in as the second argument, it is used instead of a new dictionary. """ from distutils.text_file import TextFile fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape") if g is None: g = {} done = {} notdone = {} while True: line = fp.readline() if line is None: # eof break m = _variable_rx.match(line) if m: n, v = m.group(1, 2) v = v.strip() # `$$' is a literal `$' in make tmpv = v.replace('$$', '') if "$" in tmpv: notdone[n] = v else: try: v = int(v) except ValueError: # insert literal `$' done[n] = v.replace('$$', '$') else: done[n] = v # Variables with a 'PY_' prefix in the makefile. These need to # be made available without that prefix through sysconfig. # Special care is needed to ensure that variable expansion works, even # if the expansion uses the name without a prefix. renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') # do variable interpolation here while notdone: for name in list(notdone): value = notdone[name] m = _findvar1_rx.search(value) or _findvar2_rx.search(value) if m: n = m.group(1) found = True if n in done: item = str(done[n]) elif n in notdone: # get it on a subsequent round found = False elif n in os.environ: # do it like make: fall back to environment item = os.environ[n] elif n in renamed_variables: if name.startswith('PY_') and name[3:] in renamed_variables: item = "" elif 'PY_' + n in notdone: found = False else: item = str(done['PY_' + n]) else: done[n] = item = "" if found: after = value[m.end():] value = value[:m.start()] + item + after if "$" in after: notdone[name] = value else: try: value = int(value) except ValueError: done[name] = value.strip() else: done[name] = value del notdone[name] if name.startswith('PY_') \ and name[3:] in renamed_variables: name = name[3:] if name not in done: done[name] = value else: # bogus variable reference; just drop it since we can't deal del notdone[name] fp.close() # strip spurious spaces for k, v in done.items(): if isinstance(v, str): done[k] = v.strip() # save the results in the global dictionary g.update(done) return g def expand_makefile_vars(s, vars): """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in 'string' according to 'vars' (a dictionary mapping variable names to values). Variables not present in 'vars' are silently expanded to the empty string. The variable values in 'vars' should not contain further variable expansions; if 'vars' is the output of 'parse_makefile()', you're fine. Returns a variable-expanded version of 's'. """ # This algorithm does multiple expansion, so if vars['foo'] contains # "${bar}", it will expand ${foo} to ${bar}, and then expand # ${bar}... and so forth. This is fine as long as 'vars' comes from # 'parse_makefile()', which takes care of such expansions eagerly, # according to make's variable expansion semantics. while True: m = _findvar1_rx.search(s) or _findvar2_rx.search(s) if m: (beg, end) = m.span() s = s[0:beg] + vars.get(m.group(1)) + s[end:] else: break return s _config_vars = None def _init_posix(): """Initialize the module as appropriate for POSIX systems.""" # _sysconfigdata is generated at build time, see the sysconfig module name = '_sysconfigdata_' + sys.abiflags _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0) build_time_vars = _temp.build_time_vars global _config_vars _config_vars = {} _config_vars.update(build_time_vars) def _init_nt(): """Initialize the module as appropriate for NT""" g = {} # set basic install directories g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1) g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1) # XXX hmmm.. a normal install puts include files here g['INCLUDEPY'] = get_python_inc(plat_specific=0) g['EXT_SUFFIX'] = _imp.extension_suffixes()[0] g['EXE'] = ".exe" g['VERSION'] = get_python_version().replace(".", "") g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable)) global _config_vars _config_vars = g def get_config_vars(*args): """With no arguments, return a dictionary of all configuration variables relevant for the current platform. Generally this includes everything needed to build extensions and install both pure modules and extensions. On Unix, this means every variable defined in Python's installed Makefile; on Windows it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary. """ global _config_vars if _config_vars is None: func = globals().get("_init_" + os.name) if func: func() else: _config_vars = {} # Normalized versions of prefix and exec_prefix are handy to have; # in fact, these are the standard versions used most places in the # Distutils. _config_vars['prefix'] = PREFIX _config_vars['exec_prefix'] = EXEC_PREFIX # For backward compatibility, see issue19555 SO = _config_vars.get('EXT_SUFFIX') if SO is not None: _config_vars['SO'] = SO # Always convert srcdir to an absolute path srcdir = _config_vars.get('srcdir', project_base) if os.name == 'posix': if python_build: # If srcdir is a relative path (typically '.' or '..') # then it should be interpreted relative to the directory # containing Makefile. base = os.path.dirname(get_makefile_filename()) srcdir = os.path.join(base, srcdir) else: # srcdir is not meaningful since the installation is # spread about the filesystem. We choose the # directory containing the Makefile since we know it # exists. srcdir = os.path.dirname(get_makefile_filename()) _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir)) # Convert srcdir into an absolute path if it appears necessary. # Normally it is relative to the build directory. However, during # testing, for example, we might be running a non-installed python # from a different directory. if python_build and os.name == "posix": base = project_base if (not os.path.isabs(_config_vars['srcdir']) and base != os.getcwd()): # srcdir is relative and we are not in the same directory # as the executable. Assume executable is in the build # directory and make srcdir absolute. srcdir = os.path.join(base, _config_vars['srcdir']) _config_vars['srcdir'] = os.path.normpath(srcdir) # OS X platforms require special customization to handle # multi-architecture, multi-os-version installers if sys.platform == 'darwin': import _osx_support _osx_support.customize_config_vars(_config_vars) if args: vals = [] for name in args: vals.append(_config_vars.get(name)) return vals else: return _config_vars def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by 'get_config_vars()'. Equivalent to get_config_vars().get(name) """ if name == 'SO': import warnings warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2) return get_config_vars().get(name)
url_mappings.go
package app import ( "github.com/MurmurationsNetwork/MurmurationsServices/services/library/internal/controller/http" "github.com/MurmurationsNetwork/MurmurationsServices/services/library/internal/repository/db" "github.com/MurmurationsNetwork/MurmurationsServices/services/library/internal/service" "github.com/gin-gonic/gin" ) func
(router *gin.Engine) { schemaHandler := http.NewSchemaHandler(service.NewSchemaService(db.NewSchemaRepo())) router.GET("/schemas", schemaHandler.Search) pingHandler := http.NewPingHandler() router.GET("/ping", pingHandler.Ping) }
mapUrls
test.rs
use rust_cef_derive::{ CefExtensions, CefHeaderDeviceEventClassID, CefHeaderDeviceProduct, CefHeaderDeviceVendor, CefHeaderDeviceVersion, CefHeaderName, CefHeaderSeverity, CefHeaderVersion, ToCef, }; use rust_cef::{CefExtensions, CefHeaderName, CefHeaderVersion, ToCef}; use std::collections::HashMap; use std::fmt::{Display, Formatter, Result as FmtResult}; use time::OffsetDateTime; #[test] fn test_cef_fixed_headers_fails() { let _t = trybuild::TestCases::new(); } #[test] fn test_cef_fixed_headers() { let sh = SingleHeader {}; assert_eq!(sh.cef_header_version().unwrap(), "4234"); let ma = MultipleAttrs {}; assert_eq!(ma.cef_header_version().unwrap(), "3424"); assert_eq!(ma.cef_header_name().unwrap(), "name1"); let mh = MultipleHeaders {}; assert_eq!(mh.cef_header_version().unwrap(), "3235"); assert_eq!(mh.cef_header_name().unwrap(), "name2"); } #[test] fn test_to_cef_with_fixed_headers_and_custom_extensions() { let t = AllFixedHeadersCustomExtensions {}; assert_eq!( t.to_cef().unwrap(), "CEF:0|polyverse|zerotect|V1|LinuxKernelFault|Linux Kernel Fault|10|extension1=value1" ) } #[test] fn test_to_cef_with_fixed_and_manual_headers() { let t = ManualAndFixedHeaders {}; assert_eq!( t.to_cef().unwrap(), "CEF:customVersion|polyverse|zerotect|V1|LinuxKernelFault|Linux Kernel Fault|10|" ) } #[test] fn test_cef_extensions() { let n1 = NameStruct { name: "WillBeRenamed".to_owned(), }; let mut collector = HashMap::<String, String>::new(); assert!(n1.cef_extensions(&mut collector).is_ok()); assert_eq!( collector.get(&"newname".to_owned()), Some(&"WillBeRenamed".to_owned()) ); // Header implementation still works assert_eq!(n1.cef_header_name().unwrap(), "WillBeRenamed"); let n2 = NameInheritorStruct { name_struct: NameStruct { name: "NS1".to_owned(), }, name_struct2: Some(NameStruct { name: "NS2".to_owned(), }), address: Some("An address of some sort".to_owned()), age: 42, }; let mut collector = HashMap::<String, String>::new(); assert!(n2.cef_extensions(&mut collector).is_ok()); assert_eq!( collector.get(&"newname".to_owned()), Some(&"NS2".to_owned()) ); assert_eq!( collector.get(&"address".to_owned()), Some(&"An address of some sort".to_owned()) ); assert_eq!( collector.get(&"name2".to_owned()), Some(&"NameStruct::NS1".to_owned()) ); assert_eq!( collector.get(&"person_age".to_owned()), Some(&"42".to_owned()) ); } #[test] fn test_complete_to_cef() { let v1 = Top::V1( "ClassId234".to_owned(), NameInheritorStruct { name_struct: NameStruct { name: "Test2".to_owned(), }, name_struct2: Some(NameStruct { name: "Test1".to_owned(), }), address: Some("Address".to_owned()), age: 87, }, 24, OffsetDateTime::from_unix_timestamp_nanos(735027350723000000), ); assert_eq!( v1.to_cef().unwrap(), "CEF:1|polyverse|zerotect|V1|ClassId234|NameInheritorStruct::NameStruct::Test2|24|EnumV1Field=fixedExtensionsValue TopEnumField=fixedExtensionsValue TopStructField=fixedExtensionsValue address=Address name2=NameStruct::Test2 newname=Test1 person_age=87 rt=735027350723 top_name=ClassId234" ); let v2 = Top::V2 { event_class: "ClassId234", name_impl: NameInheritorStruct { name_struct: NameStruct { name: "Test2".to_owned(), }, name_struct2: Some(NameStruct { name: "Test1".to_owned(), }), address: Some("Address2".to_owned()), age: 78, }, severity: 85, unused: 20, timestamp: OffsetDateTime::from_unix_timestamp_nanos(326262362000000), }; assert_eq!( v2.to_cef().unwrap(), "CEF:1|polyverse|zerotect|V2|ClassId234|Test2|85|EnumV2Field=fixedExtensionsValue EventClassNewName=ClassId234 TopEnumField=fixedExtensionsValue TopStructField=fixedExtensionsValue address=Address2 name2=NameStruct::Test2 newname=Test1 person_age=78 rt=326262362 severity=85" ); let v2 = Top::V2 { event_class: "ClassId234", name_impl: NameInheritorStruct { name_struct: NameStruct { name: "Test2".to_owned(), }, name_struct2: Some(NameStruct { name: "Test1".to_owned(), }), address: None, age: 78, }, severity: 85, unused: 20, timestamp: OffsetDateTime::from_unix_timestamp_nanos(9893486324000000), }; assert_eq!( v2.to_cef().unwrap(), "CEF:1|polyverse|zerotect|V2|ClassId234|Test2|85|EnumV2Field=fixedExtensionsValue EventClassNewName=ClassId234 TopEnumField=fixedExtensionsValue TopStructField=fixedExtensionsValue name2=NameStruct::Test2 newname=Test1 person_age=78 rt=9893486324 severity=85" ); } /**************************** Test Structs ******************************************/ #[derive(CefHeaderVersion, CefHeaderName)] #[cef_values(CefHeaderVersion = "3235", CefHeaderName = "name2")] struct
{} #[derive(CefHeaderVersion)] #[cef_values(CefHeaderVersion = "3424")] #[cef_values(CefHeaderName = "name1")] #[derive(CefHeaderName)] struct MultipleAttrs {} #[derive(CefHeaderVersion)] #[cef_values(CefHeaderVersion = "4234")] struct SingleHeader {} #[derive( CefHeaderVersion, CefHeaderDeviceVendor, CefHeaderDeviceProduct, CefHeaderDeviceVersion, CefHeaderDeviceEventClassID, CefHeaderName, CefHeaderSeverity, )] #[cef_values( CefHeaderVersion = "0", CefHeaderDeviceVendor = "polyverse", CefHeaderDeviceProduct = "zerotect", CefHeaderDeviceVersion = "V1", CefHeaderDeviceEventClassID = "LinuxKernelFault", CefHeaderName = "Linux Kernel Fault", CefHeaderSeverity = "10" )] #[derive(ToCef)] struct AllFixedHeadersCustomExtensions {} impl CefExtensions for AllFixedHeadersCustomExtensions { fn cef_extensions( &self, collector: &mut HashMap<String, String>, ) -> rust_cef::CefExtensionsResult { collector.insert("extension1".to_owned(), "value1".to_owned()); Ok(()) } } #[derive( CefHeaderDeviceVendor, CefHeaderDeviceProduct, CefHeaderDeviceVersion, CefHeaderDeviceEventClassID, CefHeaderName, CefHeaderSeverity, )] #[cef_values( CefHeaderDeviceVendor = "polyverse", CefHeaderDeviceProduct = "zerotect" )] #[cef_values( CefHeaderName = "Linux Kernel Fault", CefHeaderSeverity = "10", CefHeaderDeviceVersion = "V1", CefHeaderDeviceEventClassID = "LinuxKernelFault" )] #[derive(ToCef, CefExtensions)] struct ManualAndFixedHeaders {} impl CefHeaderVersion for ManualAndFixedHeaders { fn cef_header_version(&self) -> rust_cef::CefResult { Ok("customVersion".to_owned()) } } #[derive( CefHeaderVersion, CefHeaderDeviceVendor, CefHeaderDeviceVersion, CefHeaderDeviceEventClassID, CefHeaderName, CefHeaderDeviceProduct, CefHeaderSeverity, CefExtensions, )] #[cef_values( CefHeaderVersion = "1", CefHeaderDeviceVendor = "polyverse", CefHeaderDeviceProduct = "zerotect" )] #[derive(ToCef)] #[cef_ext_values(TopEnumField = "fixedExtensionsValue")] enum Top { // Name will use the display trait, rather than inheriting the CefHeaderName trait #[cef_values(CefHeaderDeviceVersion = "V1")] #[cef_ext_values(EnumV1Field = "fixedExtensionsValue")] V1( #[cef_field(CefHeaderDeviceEventClassID)] #[cef_ext_field(top_name)] String, #[cef_field(CefHeaderName)] #[cef_ext_gobble] NameInheritorStruct, #[cef_field(CefHeaderSeverity)] usize, #[cef_ext_gobble] OffsetDateTime, ), #[cef_values(CefHeaderDeviceVersion = "V2")] #[cef_ext_values(EnumV2Field = "fixedExtensionsValue")] V2 { #[cef_field(CefHeaderDeviceEventClassID)] #[cef_ext_field(EventClassNewName)] event_class: &'static str, #[cef_inherit(CefHeaderName)] #[cef_ext_gobble] name_impl: NameInheritorStruct, #[cef_ext_field] #[cef_field(CefHeaderSeverity)] severity: usize, #[cef_ext_gobble] timestamp: OffsetDateTime, // `#[allow(dead_code)]` is an attribute that disables the `dead_code` lint #[allow(dead_code)] unused: usize, }, } #[derive(CefHeaderName)] struct TupleStule(#[cef_inherit(CefHeaderName)] NameStruct); #[derive(CefHeaderName, CefExtensions)] #[cef_ext_values(TopStructField = "fixedExtensionsValue")] struct NameInheritorStruct { // using // #[cef_ext_field] // would do: name_struct.to_string() // but we want to gobble extension field's created inside NameStruct #[cef_ext_field(name2)] #[cef_inherit(CefHeaderName)] pub name_struct: NameStruct, #[cef_ext_field] pub address: Option<String>, #[cef_ext_gobble] pub name_struct2: Option<NameStruct>, #[cef_ext_field(person_age)] pub age: usize, } impl Display for NameInheritorStruct { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { write!(f, "NameInheritorStruct::{}", self.name_struct) } } #[derive(CefHeaderName, CefExtensions)] struct NameStruct { // use the field's name #[cef_ext_field(newname)] #[cef_field(CefHeaderName)] pub name: String, } impl Display for NameStruct { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { write!(f, "NameStruct::{}", self.name) } }
MultipleHeaders
util.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "fmt" "net" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" utilnet "k8s.io/apimachinery/pkg/util/net" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" federationapi "k8s.io/kubernetes/federation/apis/federation" fedclient "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/apis/rbac" client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" kubectlcmd "k8s.io/kubernetes/pkg/kubectl/cmd" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "github.com/spf13/cobra" "github.com/spf13/pflag" ) const ( // KubeconfigSecretDataKey is the key name used in the secret to // stores a cluster's credentials. KubeconfigSecretDataKey = "kubeconfig" // Used in and to create the kube-dns configmap storing the zone info FedDomainMapKey = "federations" KubeDnsConfigmapName = "kube-dns" FedDNSZoneName = "dns-zone-name" FedNameServer = "nameserver" FedDNSProvider = "dns-provider" FedDNSProviderCoreDNS = "coredns" KubeDnsStubDomains = "stubDomains" // DefaultFederationSystemNamespace is the namespace in which // federation system components are hosted. DefaultFederationSystemNamespace = "federation-system" // Used to build a clientset for a cluster using the secret userAgentName = "kubefed-tool" KubeAPIQPS = 20.0 KubeAPIBurst = 30 rbacAPINotAvailable = "RBAC API not available" ) // used to identify the rbac api availability error. type NoRBACAPIError struct { s string } func (n *NoRBACAPIError) Error() string { return n.s } // AdminConfig provides a filesystem based kubeconfig (via // `PathOptions()`) and a mechanism to talk to the federation // host cluster and the federation control plane api server. type AdminConfig interface { // PathOptions provides filesystem based kubeconfig access. PathOptions() *clientcmd.PathOptions // FedClientSet provides a federation API compliant clientset // to communicate with the federation control plane api server FederationClientset(context, kubeconfigPath string) (*fedclient.Clientset, error) // ClusterFactory provides a mechanism to communicate with the // cluster derived from the context and the kubeconfig. ClusterFactory(context, kubeconfigPath string) cmdutil.Factory } // adminConfig implements the AdminConfig interface. type adminConfig struct { pathOptions *clientcmd.PathOptions } // NewAdminConfig creates an admin config for `kubefed` commands. func NewAdminConfig(pathOptions *clientcmd.PathOptions) AdminConfig { return &adminConfig{ pathOptions: pathOptions, } } func (a *adminConfig) PathOptions() *clientcmd.PathOptions { return a.pathOptions } func (a *adminConfig) FederationClientset(context, kubeconfigPath string) (*fedclient.Clientset, error) { fedConfig := a.getClientConfig(context, kubeconfigPath) fedClientConfig, err := fedConfig.ClientConfig() if err != nil { return nil, err } return fedclient.NewForConfigOrDie(fedClientConfig), nil } func (a *adminConfig) ClusterFactory(context, kubeconfigPath string) cmdutil.Factory { hostClientConfig := a.getClientConfig(context, kubeconfigPath) return cmdutil.NewFactory(hostClientConfig) } func (a *adminConfig) getClientConfig(context, kubeconfigPath string) clientcmd.ClientConfig { loadingRules := *a.pathOptions.LoadingRules loadingRules.Precedence = a.pathOptions.GetLoadingPrecedence() loadingRules.ExplicitPath = kubeconfigPath overrides := &clientcmd.ConfigOverrides{ CurrentContext: context, } return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&loadingRules, overrides) } // SubcommandOptions holds the configuration required by the subcommands of // `kubefed`. type SubcommandOptions struct { Name string Host string FederationSystemNamespace string Kubeconfig string } func (o *SubcommandOptions) Bind(flags *pflag.FlagSet) { flags.StringVar(&o.Kubeconfig, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.") flags.StringVar(&o.Host, "host-cluster-context", "", "Host cluster context") flags.StringVar(&o.FederationSystemNamespace, "federation-system-namespace", DefaultFederationSystemNamespace, "Namespace in the host cluster where the federation system components are installed") } func (o *SubcommandOptions) SetName(cmd *cobra.Command, args []string) error { name, err := kubectlcmd.NameFromCommandArgs(cmd, args) if err != nil { return err } o.Name = name return nil } func CreateKubeconfigSecret(clientset client.Interface, kubeconfig *clientcmdapi.Config, namespace, name, federationName, clusterName string, dryRun bool) (*api.Secret, error) { configBytes, err := clientcmd.Write(*kubeconfig) if err != nil { return nil, err } annotations := map[string]string{ federationapi.FederationNameAnnotation: federationName, } if clusterName != "" { annotations[federationapi.ClusterNameAnnotation] = clusterName } // Build the secret object with the minified and flattened // kubeconfig content. secret := &api.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Annotations: annotations, }, Data: map[string][]byte{ KubeconfigSecretDataKey: configBytes, }, } if !dryRun { return clientset.Core().Secrets(namespace).Create(secret) } return secret, nil } var kubeconfigGetterForSecret = func(secret *api.Secret) clientcmd.KubeconfigGetter { return func() (*clientcmdapi.Config, error) { var data []byte ok := false data, ok = secret.Data[KubeconfigSecretDataKey] if !ok { return nil, fmt.Errorf("secret does not have data with key: %s", KubeconfigSecretDataKey) } return clientcmd.Load(data) } } func GetClientsetFromSecret(secret *api.Secret, serverAddress string) (*client.Clientset, error) { clusterConfig, err := buildConfigFromSecret(secret, serverAddress) if err == nil && clusterConfig != nil
return nil, err } func GetServerAddress(c *federationapi.Cluster) (string, error) { hostIP, err := utilnet.ChooseHostInterface() if err != nil { return "", err } for _, item := range c.Spec.ServerAddressByClientCIDRs { _, cidrnet, err := net.ParseCIDR(item.ClientCIDR) if err != nil { return "", err } if cidrnet.Contains(hostIP) { return item.ServerAddress, nil } } return "", nil } func buildConfigFromSecret(secret *api.Secret, serverAddress string) (*restclient.Config, error) { var clusterConfig *restclient.Config var err error // Pre-1.7, the secret contained a serialized kubeconfig which contained appropriate credentials. // Post-1.7, the secret contains credentials for a service account. // Check for the service account credentials, and use them if they exist; if not, use the // serialized kubeconfig. token, tokenFound := secret.Data["token"] ca, caFound := secret.Data["ca.crt"] if tokenFound != caFound { return nil, fmt.Errorf("secret should have values for either both 'ca.crt' and 'token' in its Data, or neither: %v", secret) } else if tokenFound && caFound { clusterConfig, err = clientcmd.BuildConfigFromFlags(serverAddress, "") clusterConfig.CAData = ca clusterConfig.BearerToken = string(token) } else { kubeconfigGetter := kubeconfigGetterForSecret(secret) clusterConfig, err = clientcmd.BuildConfigFromKubeconfigGetter(serverAddress, kubeconfigGetter) } if err != nil { return nil, err } clusterConfig.QPS = KubeAPIQPS clusterConfig.Burst = KubeAPIBurst return clusterConfig, nil } // GetVersionedClientForRBACOrFail discovers the versioned rbac APIs and gets the versioned // clientset for either the preferred version or the first listed version (if no preference listed) // TODO: We need to evaluate the usage of RESTMapper interface to achieve te same functionality func GetVersionedClientForRBACOrFail(hostFactory cmdutil.Factory) (client.Interface, error) { discoveryclient, err := hostFactory.DiscoveryClient() if err != nil { return nil, err } groupList, err := discoveryclient.ServerGroups() if err != nil { return nil, fmt.Errorf("Couldn't get clientset to create RBAC roles in the host cluster: %v", err) } for _, g := range groupList.Groups { if g.Name == rbac.GroupName { if g.PreferredVersion.GroupVersion != "" { gv, err := schema.ParseGroupVersion(g.PreferredVersion.GroupVersion) if err != nil { return nil, err } return hostFactory.ClientSetForVersion(&gv) } for _, version := range g.Versions { if version.GroupVersion != "" { gv, err := schema.ParseGroupVersion(version.GroupVersion) if err != nil { return nil, err } return hostFactory.ClientSetForVersion(&gv) } } } } return nil, &NoRBACAPIError{rbacAPINotAvailable} } // ClusterServiceAccountName returns the name of a service account // whose credentials are used by the host cluster to access the // client cluster. func ClusterServiceAccountName(joiningClusterName, hostContext string) string { return fmt.Sprintf("%s-%s", joiningClusterName, hostContext) } // ClusterRoleName returns the name of a ClusterRole and its associated // ClusterRoleBinding that are used to allow the service account to // access necessary resources on the cluster. func ClusterRoleName(serviceAccountName string) string { return fmt.Sprintf("federation-controller-manager:%s", serviceAccountName) }
{ clientset := client.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, userAgentName)) return clientset, nil }
i18n.py
""" sphinx.util.i18n ~~~~~~~~~~~~~~~~ Builder superclass for all builders. :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import os import re from datetime import datetime, timezone from os import path from typing import TYPE_CHECKING, Callable, Generator, List, NamedTuple, Optional, Tuple, Union import babel.dates from babel.messages.mofile import write_mo from babel.messages.pofile import read_po from sphinx.errors import SphinxError from sphinx.locale import __ from sphinx.util import logging from sphinx.util.osutil import SEP, canon_path, relpath if TYPE_CHECKING: from sphinx.environment import BuildEnvironment logger = logging.getLogger(__name__) class LocaleFileInfoBase(NamedTuple): base_dir: str domain: str charset: str class CatalogInfo(LocaleFileInfoBase): @property def po_file(self) -> str: return self.domain + '.po' @property def mo_file(self) -> str: return self.domain + '.mo' @property def po_path(self) -> str: return path.join(self.base_dir, self.po_file) @property def mo_path(self) -> str: return path.join(self.base_dir, self.mo_file) def is_outdated(self) -> bool: return ( not path.exists(self.mo_path) or path.getmtime(self.mo_path) < path.getmtime(self.po_path)) def write_mo(self, locale: str) -> None: with open(self.po_path, encoding=self.charset) as file_po: try: po = read_po(file_po, locale) except Exception as exc: logger.warning(__('reading error: %s, %s'), self.po_path, exc) return with open(self.mo_path, 'wb') as file_mo: try: write_mo(file_mo, po) except Exception as exc: logger.warning(__('writing error: %s, %s'), self.mo_path, exc) class CatalogRepository: """A repository for message catalogs.""" def __init__(self, basedir: str, locale_dirs: List[str], language: str, encoding: str) -> None: self.basedir = basedir self._locale_dirs = locale_dirs self.language = language self.encoding = encoding @property def locale_dirs(self) -> Generator[str, None, None]: if not self.language: return for locale_dir in self._locale_dirs: locale_dir = path.join(self.basedir, locale_dir) locale_path = path.join(locale_dir, self.language, 'LC_MESSAGES') if path.exists(locale_path): yield locale_dir else: logger.verbose(__('locale_dir %s does not exists'), locale_path) @property def pofiles(self) -> Generator[Tuple[str, str], None, None]: for locale_dir in self.locale_dirs: basedir = path.join(locale_dir, self.language, 'LC_MESSAGES') for root, dirnames, filenames in os.walk(basedir): # skip dot-directories for dirname in dirnames: if dirname.startswith('.'): dirnames.remove(dirname) for filename in filenames: if filename.endswith('.po'): fullpath = path.join(root, filename) yield basedir, relpath(fullpath, basedir) @property def catalogs(self) -> Generator[CatalogInfo, None, None]: for basedir, filename in self.pofiles: domain = canon_path(path.splitext(filename)[0]) yield CatalogInfo(basedir, domain, self.encoding) def docname_to_domain(docname: str, compaction: Union[bool, str]) -> str: """Convert docname to domain for catalogs.""" if isinstance(compaction, str): return compaction if compaction: return docname.split(SEP, 1)[0] else: return docname # date_format mappings: ustrftime() to bable.dates.format_datetime() date_format_mappings = { '%a': 'EEE', # Weekday as locale’s abbreviated name. '%A': 'EEEE', # Weekday as locale’s full name. '%b': 'MMM', # Month as locale’s abbreviated name. '%B': 'MMMM', # Month as locale’s full name. '%c': 'medium', # Locale’s appropriate date and time representation. '%-d': 'd', # Day of the month as a decimal number. '%d': 'dd', # Day of the month as a zero-padded decimal number. '%-H': 'H', # Hour (24-hour clock) as a decimal number [0,23]. '%H': 'HH', # Hour (24-hour clock) as a zero-padded decimal number [00,23]. '%-I': 'h', # Hour (12-hour clock) as a decimal number [1,12]. '%I': 'hh', # Hour (12-hour clock) as a zero-padded decimal number [01,12]. '%-j': 'D', # Day of the year as a decimal number. '%j': 'DDD', # Day of the year as a zero-padded decimal number. '%-m': 'M', # Month as a decimal number. '%m': 'MM', # Month as a zero-padded decimal number. '%-M': 'm', # Minute as a decimal number [0,59]. '%M': 'mm', # Minute as a zero-padded decimal number [00,59]. '%p': 'a', # Locale’s equivalent of either AM or PM. '%-S': 's', # Second as a decimal number. '%S': 'ss', # Second as a zero-padded decimal number. '%U': 'WW', # Week number of the year (Sunday as the first day of the week) # as a zero padded decimal number. All days in a new year preceding # the first Sunday are considered to be in week 0. '%w': 'e', # Weekday as a decimal number, where 0 is Sunday and 6 is Saturday. '%-W': 'W', # Week number of the year (Monday as the first day of the week) # as a decimal number. All days in a new year preceding the first # Monday are considered to be in week 0. '%W': 'WW', # Week number of the year (Monday as the first day of the week) # as a zero-padded decimal number. '%x': 'medium', # Locale’s appropriate date representation. '%X': 'medium', # Locale’s appropriate time representation. '%y': 'YY', # Year without century as a zero-padded decimal number. '%Y': 'yyyy', # Year with century as a decimal number. '%Z': 'zzz', # Time zone name (no characters if no time zone exists). '%z': 'ZZZ', # UTC offset in the form ±HHMM[SS[.ffffff]] # (empty string if the object is naive). '%%': '%', } date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings)) def babel_format_date(date: datetime, format: str, locale: Optional[str], formatter: Callable = babel.dates.format_date) -> str: if locale is None: locale = 'en' # Check if we have the tzinfo attribute. If not we cannot do any time # related formats. if not hasattr(date, 'tzinfo'): formatter = babel.dates.format_date try: return formatter(date, format, locale=locale) except (ValueError, babel.core.UnknownLocaleError): # fallback to English return formatter(date, format, locale='en') except AttributeError: logger.warning(__('Invalid date format. Quote the string by single quote ' 'if you want to output it directly: %s'), format) return format def format_date(format: str, date: datetime = None, language: Optional[str] = None) -> str: if date is None: # If time is not specified, try to use $SOURCE_DATE_EPOCH variable # See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal source_date_epoch = os.getenv('SOURCE_DATE_EPOCH') if source_date_epoch is not None: date = datetime.utcfromtimestamp(float(source_date_epoch)) else: date = datetime.now(timezone.utc).astimezone() result = [] tokens = date_format_re.split(format) for token in tokens: if token in date_format_mappings: babel_format = da
result.append(token) return "".join(result) def get_image_filename_for_language(filename: str, env: "BuildEnvironment") -> str: if not env.config.language: return filename filename_format = env.config.figure_language_filename d = dict() d['root'], d['ext'] = path.splitext(filename) dirname = path.dirname(d['root']) if dirname and not dirname.endswith(path.sep): dirname += path.sep docpath = path.dirname(env.docname) if docpath and not docpath.endswith(path.sep): docpath += path.sep d['path'] = dirname d['basename'] = path.basename(d['root']) d['docpath'] = docpath d['language'] = env.config.language try: return filename_format.format(**d) except KeyError as exc: raise SphinxError('Invalid figure_language_filename: %r' % exc) from exc def search_image_for_language(filename: str, env: "BuildEnvironment") -> str: if not env.config.language: return filename translated = get_image_filename_for_language(filename, env) _, abspath = env.relfn2path(translated) if path.exists(abspath): return translated else: return filename
te_format_mappings.get(token, '') # Check if we have to use a different babel formatter then # format_datetime, because we only want to format a date # or a time. if token == '%x': function = babel.dates.format_date elif token == '%X': function = babel.dates.format_time else: function = babel.dates.format_datetime result.append(babel_format_date(date, babel_format, locale=language, formatter=function)) else:
loop_expr.rs
use anyhow::{bail, Result}; use crate::backend::scopes::sym_table::SymbolTable; use crate::backend::scopes::symbol::{Symbol, SymbolClone}; use crate::backend::scopes::types::Type; use crate::backend::scopes::Scope; #[derive(Clone, Debug)] pub(crate) struct LoopExpr { pub(crate) decls: Vec<Box<dyn Symbol>>, pub(crate) expr: Box<dyn Symbol>, } impl LoopExpr { pub(crate) fn new(decls: Vec<Box<dyn Symbol>>, expr: Box<dyn Symbol>) -> Box<Self> { Box::new(LoopExpr { decls, expr }) } } impl Symbol for LoopExpr { fn get_name(&self) -> &str { "loop" } fn get_type(&self) -> Option<Box<dyn Type>> { self.expr.get_type() } fn set_type(&mut self, ty: Option<Box<dyn Type>>) { self.expr.set_type(ty) } fn
(&mut self, scope: &mut dyn Scope) -> Result<Option<Box<dyn Type>>> { let mut sym_table = SymbolTable::new("loop", Some(scope.clone_box())); sym_table.define(self.clone_box()); for d in self.decls.iter() { if sym_table.define(d.clone()).is_some() { bail!( "duplicated symbol '{} on loop for declaration", d.get_name() ); } } for d in self.decls.iter_mut() { d.resolve_type(&mut *sym_table)?; } self.expr.resolve_type(&mut *sym_table)?; for d in sym_table.get_symbols() { d.define_into(scope); } Ok(self.get_type()) } fn storable(&self) -> bool { true } }
resolve_type
forces_trainer.py
""" Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree. """ import os from collections import defaultdict import numpy as np import torch import torch_geometric from torch.utils.data import DataLoader, DistributedSampler from tqdm import tqdm from ocpmodels.common import distutils from ocpmodels.common.data_parallel import ParallelCollater from ocpmodels.common.registry import registry from ocpmodels.common.relaxation.ml_relaxation import ml_relax from ocpmodels.common.utils import plot_histogram from ocpmodels.modules.evaluator import Evaluator from ocpmodels.modules.normalizer import Normalizer from ocpmodels.trainers.base_trainer import BaseTrainer @registry.register_trainer("forces") class ForcesTrainer(BaseTrainer): """ Trainer class for the Structure to Energy & Force (S2EF) and Initial State to Relaxed State (IS2RS) tasks. .. note:: Examples of configurations for task, model, dataset and optimizer can be found in `configs/ocp_s2ef <https://github.com/Open-Catalyst-Project/baselines/tree/master/configs/ocp_is2re/>`_ and `configs/ocp_is2rs <https://github.com/Open-Catalyst-Project/baselines/tree/master/configs/ocp_is2rs/>`_. Args: task (dict): Task configuration. model (dict): Model configuration. dataset (dict): Dataset configuration. The dataset needs to be a SinglePointLMDB dataset. optimizer (dict): Optimizer configuration. identifier (str): Experiment identifier that is appended to log directory. run_dir (str, optional): Path to the run directory where logs are to be saved. (default: :obj:`None`) is_debug (bool, optional): Run in debug mode. (default: :obj:`False`) is_vis (bool, optional): Run in debug mode. (default: :obj:`False`) print_every (int, optional): Frequency of printing logs. (default: :obj:`100`) seed (int, optional): Random number seed. (default: :obj:`None`) logger (str, optional): Type of logger to be used. (default: :obj:`tensorboard`) local_rank (int, optional): Local rank of the process, only applicable for distributed training. (default: :obj:`0`) amp (bool, optional): Run using automatic mixed precision. (default: :obj:`False`) """ def __init__( self, task, model, dataset, optimizer, identifier, run_dir=None, is_debug=False, is_vis=False, print_every=100, seed=None, logger="tensorboard", local_rank=0, amp=False, ): super().__init__( task=task, model=model, dataset=dataset, optimizer=optimizer, identifier=identifier, run_dir=run_dir, is_debug=is_debug, is_vis=is_vis, print_every=print_every, seed=seed, logger=logger, local_rank=local_rank, amp=amp, name="s2ef", ) def load_task(self): print("### Loading dataset: {}".format(self.config["task"]["dataset"])) self.parallel_collater = ParallelCollater( 1, self.config["model_attributes"].get("otf_graph", False) ) if self.config["task"]["dataset"] == "trajectory_lmdb": self.train_dataset = registry.get_dataset_class( self.config["task"]["dataset"] )(self.config["dataset"]) self.train_loader = DataLoader( self.train_dataset, batch_size=self.config["optim"]["batch_size"], shuffle=True, collate_fn=self.parallel_collater, num_workers=self.config["optim"]["num_workers"], pin_memory=True, ) self.val_loader = self.test_loader = None if "val_dataset" in self.config: self.val_dataset = registry.get_dataset_class( self.config["task"]["dataset"] )(self.config["val_dataset"]) self.val_loader = DataLoader( self.val_dataset, self.config["optim"].get("eval_batch_size", 64), shuffle=False, collate_fn=self.parallel_collater, num_workers=self.config["optim"]["num_workers"], pin_memory=True, ) if "test_dataset" in self.config: self.test_dataset = registry.get_dataset_class( self.config["task"]["dataset"] )(self.config["test_dataset"]) self.test_loader = DataLoader( self.test_dataset, self.config["optim"].get("eval_batch_size", 64), shuffle=False, collate_fn=self.parallel_collater, num_workers=self.config["optim"]["num_workers"], pin_memory=True, ) if "relax_dataset" in self.config["task"]: assert os.path.isfile( self.config["task"]["relax_dataset"]["src"] ) self.relax_dataset = registry.get_dataset_class( "single_point_lmdb" )(self.config["task"]["relax_dataset"]) self.relax_sampler = DistributedSampler( self.relax_dataset, num_replicas=distutils.get_world_size(), rank=distutils.get_rank(), shuffle=False, ) self.relax_loader = DataLoader( self.relax_dataset, batch_size=self.config["optim"].get("eval_batch_size", 64), collate_fn=self.parallel_collater, num_workers=self.config["optim"]["num_workers"], pin_memory=True, sampler=self.relax_sampler, ) else: self.dataset = registry.get_dataset_class( self.config["task"]["dataset"] )(self.config["dataset"]) ( self.train_loader, self.val_loader, self.test_loader, ) = self.dataset.get_dataloaders( batch_size=self.config["optim"]["batch_size"], collate_fn=self.parallel_collater, ) self.num_targets = 1 # Normalizer for the dataset. # Compute mean, std of training set labels. self.normalizers = {} if self.config["dataset"].get("normalize_labels", False): if "target_mean" in self.config["dataset"]: self.normalizers["target"] = Normalizer( mean=self.config["dataset"]["target_mean"], std=self.config["dataset"]["target_std"], device=self.device, ) else: self.normalizers["target"] = Normalizer( tensor=self.train_loader.dataset.data.y[ self.train_loader.dataset.__indices__ ], device=self.device, ) # If we're computing gradients wrt input, set mean of normalizer to 0 -- # since it is lost when compute dy / dx -- and std to forward target std if self.config["model_attributes"].get("regress_forces", True): if self.config["dataset"].get("normalize_labels", False): if "grad_target_mean" in self.config["dataset"]: self.normalizers["grad_target"] = Normalizer( mean=self.config["dataset"]["grad_target_mean"], std=self.config["dataset"]["grad_target_std"], device=self.device, ) else: self.normalizers["grad_target"] = Normalizer( tensor=self.train_loader.dataset.data.y[ self.train_loader.dataset.__indices__ ], device=self.device, ) self.normalizers["grad_target"].mean.fill_(0) if ( self.is_vis and self.config["task"]["dataset"] != "qm9" and distutils.is_master() ): # Plot label distribution. plots = [ plot_histogram( self.train_loader.dataset.data.y.tolist(), xlabel="{}/raw".format(self.config["task"]["labels"][0]), ylabel="# Examples", title="Split: train", ), plot_histogram( self.val_loader.dataset.data.y.tolist(), xlabel="{}/raw".format(self.config["task"]["labels"][0]), ylabel="# Examples", title="Split: val", ), plot_histogram( self.test_loader.dataset.data.y.tolist(), xlabel="{}/raw".format(self.config["task"]["labels"][0]), ylabel="# Examples", title="Split: test", ), ] self.logger.log_plots(plots) # Takes in a new data source and generates predictions on it. def predict( self, data_loader, per_image=True, results_file=None, disable_tqdm=True ): if distutils.is_master() and not disable_tqdm: print("### Predicting on test.") assert isinstance( data_loader, ( torch.utils.data.dataloader.DataLoader, torch_geometric.data.Batch, ), ) rank = distutils.get_rank() if isinstance(data_loader, torch_geometric.data.Batch): data_loader = [[data_loader]] self.model.eval() if self.normalizers is not None and "target" in self.normalizers: self.normalizers["target"].to(self.device) self.normalizers["grad_target"].to(self.device) predictions = {"id": [], "energy": [], "forces": []} for i, batch_list in tqdm( enumerate(data_loader), total=len(data_loader), position=rank, desc="device {}".format(rank), disable=disable_tqdm, ): with torch.cuda.amp.autocast(enabled=self.scaler is not None): out = self._forward(batch_list) if self.normalizers is not None and "target" in self.normalizers: out["energy"] = self.normalizers["target"].denorm(
) if per_image: atoms_sum = 0 systemids = [ str(i) + "_" + str(j) for i, j in zip( batch_list[0].sid.tolist(), batch_list[0].fid.tolist() ) ] predictions["id"].extend(systemids) predictions["energy"].extend( out["energy"].to(torch.float16).tolist() ) batch_natoms = torch.cat( [batch.natoms for batch in batch_list] ) batch_fixed = torch.cat([batch.fixed for batch in batch_list]) for natoms in batch_natoms: forces = ( out["forces"][atoms_sum : natoms + atoms_sum] .cpu() .detach() .to(torch.float16) .numpy() ) # evalAI only requires forces on free atoms if results_file is not None: _free_atoms = ( batch_fixed[atoms_sum : natoms + atoms_sum] == 0 ).tolist() forces = forces[_free_atoms] atoms_sum += natoms predictions["forces"].append(forces) else: predictions["energy"] = out["energy"].detach() predictions["forces"] = out["forces"].detach() return predictions predictions["forces"] = np.array(predictions["forces"], dtype=object) predictions["energy"] = np.array(predictions["energy"]) predictions["id"] = np.array(predictions["id"]) self.save_results(predictions, results_file, keys=["energy", "forces"]) return predictions def train(self): self.best_val_metric = -1.0 eval_every = self.config["optim"].get("eval_every", -1) primary_metric = self.config["task"].get( "primary_metric", self.evaluator.task_primary_metric[self.name] ) iters = 0 self.metrics = {} for epoch in range(self.config["optim"]["max_epochs"]): self.model.train() for i, batch in enumerate(self.train_loader): # Forward, loss, backward. with torch.cuda.amp.autocast(enabled=self.scaler is not None): out = self._forward(batch) loss = self._compute_loss(out, batch) loss = self.scaler.scale(loss) if self.scaler else loss self._backward(loss) scale = self.scaler.get_scale() if self.scaler else 1.0 # Compute metrics. self.metrics = self._compute_metrics( out, batch, self.evaluator, self.metrics, ) self.metrics = self.evaluator.update( "loss", loss.item() / scale, self.metrics ) # Print metrics, make plots. log_dict = {k: self.metrics[k]["metric"] for k in self.metrics} log_dict.update( {"epoch": epoch + (i + 1) / len(self.train_loader)} ) if ( i % self.config["cmd"]["print_every"] == 0 and distutils.is_master() ): log_str = [ "{}: {:.4f}".format(k, v) for k, v in log_dict.items() ] print(", ".join(log_str)) self.metrics = {} if self.logger is not None: self.logger.log( log_dict, step=epoch * len(self.train_loader) + i + 1, split="train", ) iters += 1 # Evaluate on val set every `eval_every` iterations. if eval_every != -1 and iters % eval_every == 0: if self.val_loader is not None: val_metrics = self.validate( split="val", epoch=epoch - 1 + (i + 1) / len(self.train_loader), ) if ( val_metrics[primary_metric]["metric"] > self.best_val_metric ): self.best_val_metric = val_metrics[primary_metric][ "metric" ] current_epoch = epoch + (i + 1) / len( self.train_loader ) self.save(current_epoch, val_metrics) if self.test_loader is not None: self.predict( self.test_loader, results_file="predictions", disable_tqdm=False, ) self.scheduler.step() torch.cuda.empty_cache() if eval_every == -1: if self.val_loader is not None: val_metrics = self.validate(split="val", epoch=epoch) if ( val_metrics[primary_metric]["metric"] > self.best_val_metric ): self.best_val_metric = val_metrics[primary_metric][ "metric" ] self.save(epoch + 1, val_metrics) if self.test_loader is not None: self.predict( self.test_loader, results_file="predictions", disable_tqdm=False, ) else: self.save(epoch + 1, self.metrics) def _forward(self, batch_list): # forward pass. if self.config["model_attributes"].get("regress_forces", True): out_energy, out_forces = self.model(batch_list) else: out_energy = self.model(batch_list) if out_energy.shape[-1] == 1: out_energy = out_energy.view(-1) out = { "energy": out_energy, } if self.config["model_attributes"].get("regress_forces", True): out["forces"] = out_forces return out def _compute_loss(self, out, batch_list): loss = [] # Energy loss. energy_target = torch.cat( [batch.y.to(self.device) for batch in batch_list], dim=0 ) if self.config["dataset"].get("normalize_labels", False): energy_target = self.normalizers["target"].norm(energy_target) energy_mult = self.config["optim"].get("energy_coefficient", 1) loss.append(energy_mult * self.criterion(out["energy"], energy_target)) # Force loss. if self.config["model_attributes"].get("regress_forces", True): force_target = torch.cat( [batch.force.to(self.device) for batch in batch_list], dim=0 ) if self.config["dataset"].get("normalize_labels", False): force_target = self.normalizers["grad_target"].norm( force_target ) # Force coefficient = 30 has been working well for us. force_mult = self.config["optim"].get("force_coefficient", 30) if self.config["task"].get("train_on_free_atoms", False): fixed = torch.cat( [batch.fixed.to(self.device) for batch in batch_list] ) mask = fixed == 0 loss.append( force_mult * self.criterion(out["forces"][mask], force_target[mask]) ) else: loss.append( force_mult * self.criterion(out["forces"], force_target) ) # Sanity check to make sure the compute graph is correct. for lc in loss: assert hasattr(lc, "grad_fn") loss = sum(loss) return loss def _compute_metrics(self, out, batch_list, evaluator, metrics={}): natoms = torch.cat( [batch.natoms.to(self.device) for batch in batch_list], dim=0 ) target = { "energy": torch.cat( [batch.y.to(self.device) for batch in batch_list], dim=0 ), "forces": torch.cat( [batch.force.to(self.device) for batch in batch_list], dim=0 ), "natoms": natoms, } out["natoms"] = natoms if self.config["task"].get("eval_on_free_atoms", True): fixed = torch.cat( [batch.fixed.to(self.device) for batch in batch_list] ) mask = fixed == 0 out["forces"] = out["forces"][mask] target["forces"] = target["forces"][mask] s_idx = 0 natoms_free = [] for natoms in target["natoms"]: natoms_free.append( torch.sum(mask[s_idx : s_idx + natoms]).item() ) s_idx += natoms target["natoms"] = torch.LongTensor(natoms_free).to(self.device) out["natoms"] = torch.LongTensor(natoms_free).to(self.device) if self.config["dataset"].get("normalize_labels", False): out["energy"] = self.normalizers["target"].denorm(out["energy"]) out["forces"] = self.normalizers["grad_target"].denorm( out["forces"] ) metrics = evaluator.eval(out, target, prev_metrics=metrics) return metrics def run_relaxations(self, split="val", epoch=None): print("### Running ML-relaxations") self.model.eval() evaluator, metrics = Evaluator(task="is2rs"), {} if hasattr(self.relax_dataset[0], "pos_relaxed") and hasattr( self.relax_dataset[0], "y_relaxed" ): split = "val" else: split = "test" ids = [] relaxed_positions = [] for i, batch in tqdm( enumerate(self.relax_loader), total=len(self.relax_loader) ): relaxed_batch = ml_relax( batch=batch, model=self, steps=self.config["task"].get("relaxation_steps", 200), fmax=self.config["task"].get("relaxation_fmax", 0.0), relax_opt=self.config["task"]["relax_opt"], device=self.device, transform=None, ) if self.config["task"].get("write_pos", False): systemids = [str(i) for i in relaxed_batch.sid.tolist()] natoms = relaxed_batch.natoms.tolist() positions = torch.split(relaxed_batch.pos, natoms) batch_relaxed_positions = [pos.tolist() for pos in positions] relaxed_positions += batch_relaxed_positions ids += systemids if split == "val": mask = relaxed_batch.fixed == 0 s_idx = 0 natoms_free = [] for natoms in relaxed_batch.natoms: natoms_free.append( torch.sum(mask[s_idx : s_idx + natoms]).item() ) s_idx += natoms target = { "energy": relaxed_batch.y_relaxed, "positions": relaxed_batch.pos_relaxed[mask], "cell": relaxed_batch.cell, "pbc": torch.tensor([True, True, True]), "natoms": torch.LongTensor(natoms_free), } prediction = { "energy": relaxed_batch.y, "positions": relaxed_batch.pos[mask], "cell": relaxed_batch.cell, "pbc": torch.tensor([True, True, True]), "natoms": torch.LongTensor(natoms_free), } metrics = evaluator.eval(prediction, target, metrics) if self.config["task"].get("write_pos", False): rank = distutils.get_rank() pos_filename = os.path.join( self.config["cmd"]["results_dir"], f"relaxed_pos_{rank}.npz" ) np.savez_compressed( pos_filename, ids=ids, pos=np.array(relaxed_positions, dtype=object), ) distutils.synchronize() if distutils.is_master(): gather_results = defaultdict(list) full_path = os.path.join( self.config["cmd"]["results_dir"], "relaxed_positions.npz", ) for i in range(distutils.get_world_size()): rank_path = os.path.join( self.config["cmd"]["results_dir"], f"relaxed_pos_{i}.npz", ) rank_results = np.load(rank_path, allow_pickle=True) gather_results["ids"].extend(rank_results["ids"]) gather_results["pos"].extend(rank_results["pos"]) os.remove(rank_path) # Because of how distributed sampler works, some system ids # might be repeated to make no. of samples even across GPUs. _, idx = np.unique(gather_results["ids"], return_index=True) gather_results["ids"] = np.array(gather_results["ids"])[idx] gather_results["pos"] = np.array( gather_results["pos"], dtype=object )[idx] print(f"Writing results to {full_path}") np.savez_compressed(full_path, **gather_results) if split == "val": aggregated_metrics = {} for k in metrics: aggregated_metrics[k] = { "total": distutils.all_reduce( metrics[k]["total"], average=False, device=self.device ), "numel": distutils.all_reduce( metrics[k]["numel"], average=False, device=self.device ), } aggregated_metrics[k]["metric"] = ( aggregated_metrics[k]["total"] / aggregated_metrics[k]["numel"] ) metrics = aggregated_metrics # Make plots. log_dict = {k: metrics[k]["metric"] for k in metrics} if self.logger is not None and epoch is not None: self.logger.log( log_dict, step=(epoch + 1) * len(self.train_loader), split=split, ) if distutils.is_master(): print(metrics)
out["energy"] ) out["forces"] = self.normalizers["grad_target"].denorm( out["forces"]
plugins.gateway.ts
import { EventEmitter } from 'events'; import { UseGuards } from '@nestjs/common'; import { SubscribeMessage, WebSocketGateway, WsException } from '@nestjs/websockets'; import * as color from 'bash-color'; import { PluginsService } from './plugins.service'; import { Logger } from '../../core/logger/logger.service'; import { WsAdminGuard } from '../../core/auth/guards/ws-admin-guard'; @UseGuards(WsAdminGuard) @WebSocketGateway({ namespace: '/plugins' }) export class
{ constructor( private pluginsService: PluginsService, private logger: Logger, ) { } @SubscribeMessage('install') async installPlugin(client: EventEmitter, payload: string) { try { return await this.pluginsService.installPlugin(payload, client); } catch (e) { this.logger.error(e); client.emit('stdout', '\n\r' + color.red(e.toString()) + '\n\r'); return new WsException(e); } } @SubscribeMessage('uninstall') async uninstallPlugin(client: EventEmitter, payload: string) { try { return await this.pluginsService.uninstallPlugin(payload, client); } catch (e) { this.logger.error(e); client.emit('stdout', '\n\r' + color.red(e.toString()) + '\n\r'); return new WsException(e); } } @SubscribeMessage('update') async updatePlugin(client: EventEmitter, payload: string) { try { return await this.pluginsService.updatePlugin(payload, client); } catch (e) { this.logger.error(e); client.emit('stdout', '\n\r' + color.red(e.toString()) + '\n\r'); return new WsException(e); } } @SubscribeMessage('homebridge-update') async homebridgeUpdate(client: EventEmitter) { try { return await this.pluginsService.updateHomebridgePackage(client); } catch (e) { this.logger.error(e); client.emit('stdout', '\n\r' + color.red(e.toString()) + '\n\r'); return new WsException(e); } } }
PluginsGateway
response.go
// Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package v2 import ( "time" "github.com/aws/amazon-ecs-agent/agent/api" apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" apieni "github.com/aws/amazon-ecs-agent/agent/api/eni" "github.com/aws/amazon-ecs-agent/agent/containermetadata" "github.com/aws/amazon-ecs-agent/agent/engine/dockerstate" "github.com/aws/amazon-ecs-agent/agent/handlers/utils" v1 "github.com/aws/amazon-ecs-agent/agent/handlers/v1" "github.com/aws/aws-sdk-go/aws" "github.com/cihub/seelog" "github.com/pkg/errors" ) // TaskResponse defines the schema for the task response JSON object type TaskResponse struct { Cluster string `json:"Cluster"` TaskARN string `json:"TaskARN"` Family string `json:"Family"` Revision string `json:"Revision"` DesiredStatus string `json:"DesiredStatus,omitempty"` KnownStatus string `json:"KnownStatus"` Containers []ContainerResponse `json:"Containers,omitempty"` Limits *LimitsResponse `json:"Limits,omitempty"` PullStartedAt *time.Time `json:"PullStartedAt,omitempty"` PullStoppedAt *time.Time `json:"PullStoppedAt,omitempty"` ExecutionStoppedAt *time.Time `json:"ExecutionStoppedAt,omitempty"` AvailabilityZone string `json:"AvailabilityZone,omitempty"` TaskTags map[string]string `json:"TaskTags,omitempty"` ContainerInstanceTags map[string]string `json:"ContainerInstanceTags,omitempty"` } // ContainerResponse defines the schema for the container response // JSON object type ContainerResponse struct { ID string `json:"DockerId"` Name string `json:"Name"` DockerName string `json:"DockerName"` Image string `json:"Image"` ImageID string `json:"ImageID"` Ports []v1.PortResponse `json:"Ports,omitempty"` Labels map[string]string `json:"Labels,omitempty"` DesiredStatus string `json:"DesiredStatus"` KnownStatus string `json:"KnownStatus"` ExitCode *int `json:"ExitCode,omitempty"` Limits LimitsResponse `json:"Limits"` CreatedAt *time.Time `json:"CreatedAt,omitempty"` StartedAt *time.Time `json:"StartedAt,omitempty"` FinishedAt *time.Time `json:"FinishedAt,omitempty"` Type string `json:"Type"` Networks []containermetadata.Network `json:"Networks,omitempty"` Health *apicontainer.HealthStatus `json:"Health,omitempty"` Volumes []v1.VolumeResponse `json:"Volumes,omitempty"` } // LimitsResponse defines the schema for task/cpu limits response // JSON object type LimitsResponse struct { CPU *float64 `json:"CPU,omitempty"` Memory *int64 `json:"Memory,omitempty"` } // NewTaskResponse creates a new response object for the task func NewTaskResponse(taskARN string, state dockerstate.TaskEngineState, ecsClient api.ECSClient, cluster string, az string, containerInstanceArn string, propagateTags bool) (*TaskResponse, error) { task, ok := state.TaskByArn(taskARN) if !ok { return nil, errors.Errorf("v2 task response: unable to find task '%s'", taskARN) } resp := &TaskResponse{ Cluster: cluster, TaskARN: task.Arn, Family: task.Family, Revision: task.Version, DesiredStatus: task.GetDesiredStatus().String(), KnownStatus: task.GetKnownStatus().String(), AvailabilityZone: az, } taskCPU := task.CPU taskMemory := task.Memory if taskCPU != 0 || taskMemory != 0 { taskLimits := &LimitsResponse{} if taskCPU != 0 { taskLimits.CPU = &taskCPU } if taskMemory != 0 { taskLimits.Memory = &taskMemory } resp.Limits = taskLimits } if timestamp := task.GetPullStartedAt(); !timestamp.IsZero()
if timestamp := task.GetPullStoppedAt(); !timestamp.IsZero() { resp.PullStoppedAt = aws.Time(timestamp.UTC()) } if timestamp := task.GetExecutionStoppedAt(); !timestamp.IsZero() { resp.ExecutionStoppedAt = aws.Time(timestamp.UTC()) } containerNameToDockerContainer, ok := state.ContainerMapByArn(task.Arn) if !ok { seelog.Warnf("V2 task response: unable to get container name mapping for task '%s'", task.Arn) return resp, nil } eni := task.GetTaskENI() for _, dockerContainer := range containerNameToDockerContainer { containerResponse := newContainerResponse(dockerContainer, eni, state) resp.Containers = append(resp.Containers, containerResponse) } if propagateTags { propagateTagsToMetadata(state, ecsClient, containerInstanceArn, taskARN, resp) } return resp, nil } func propagateTagsToMetadata(state dockerstate.TaskEngineState, ecsClient api.ECSClient, containerInstanceArn, taskARN string, resp *TaskResponse) { containerInstanceTags, err := ecsClient.GetResourceTags(containerInstanceArn) if err == nil { resp.ContainerInstanceTags = make(map[string]string) for _, tag := range containerInstanceTags { resp.ContainerInstanceTags[*tag.Key] = *tag.Value } } else { seelog.Errorf("Could not get container instance tags for %s: %s", containerInstanceArn, err.Error()) } taskTags, err := ecsClient.GetResourceTags(taskARN) if err == nil { resp.TaskTags = make(map[string]string) for _, tag := range taskTags { resp.TaskTags[*tag.Key] = *tag.Value } } else { seelog.Errorf("Could not get task tags for %s: %s", taskARN, err.Error()) } } // NewContainerResponse creates a new container response based on container id func NewContainerResponse(containerID string, state dockerstate.TaskEngineState) (*ContainerResponse, error) { dockerContainer, ok := state.ContainerByID(containerID) if !ok { return nil, errors.Errorf( "v2 container response: unable to find container '%s'", containerID) } task, ok := state.TaskByID(containerID) if !ok { return nil, errors.Errorf( "v2 container response: unable to find task for container '%s'", containerID) } resp := newContainerResponse(dockerContainer, task.GetTaskENI(), state) return &resp, nil } func newContainerResponse(dockerContainer *apicontainer.DockerContainer, eni *apieni.ENI, state dockerstate.TaskEngineState) ContainerResponse { container := dockerContainer.Container resp := ContainerResponse{ ID: dockerContainer.DockerID, Name: container.Name, DockerName: dockerContainer.DockerName, Image: container.Image, ImageID: container.ImageID, DesiredStatus: container.GetDesiredStatus().String(), KnownStatus: container.GetKnownStatus().String(), Limits: LimitsResponse{ CPU: aws.Float64(float64(container.CPU)), Memory: aws.Int64(int64(container.Memory)), }, Type: container.Type.String(), ExitCode: container.GetKnownExitCode(), Labels: container.GetLabels(), } // Write the container health status inside the container if dockerContainer.Container.HealthStatusShouldBeReported() { health := dockerContainer.Container.GetHealthStatus() resp.Health = &health } if createdAt := container.GetCreatedAt(); !createdAt.IsZero() { createdAt = createdAt.UTC() resp.CreatedAt = &createdAt } if startedAt := container.GetStartedAt(); !startedAt.IsZero() { startedAt = startedAt.UTC() resp.StartedAt = &startedAt } if finishedAt := container.GetFinishedAt(); !finishedAt.IsZero() { finishedAt = finishedAt.UTC() resp.FinishedAt = &finishedAt } for _, binding := range container.Ports { port := v1.PortResponse{ ContainerPort: binding.ContainerPort, Protocol: binding.Protocol.String(), } if eni == nil { port.HostPort = binding.HostPort } else { port.HostPort = port.ContainerPort } resp.Ports = append(resp.Ports, port) } if eni != nil { resp.Networks = []containermetadata.Network{ { NetworkMode: utils.NetworkModeAWSVPC, IPv4Addresses: eni.GetIPV4Addresses(), IPv6Addresses: eni.GetIPV6Addresses(), }, } } resp.Volumes = v1.NewVolumesResponse(dockerContainer) return resp }
{ resp.PullStartedAt = aws.Time(timestamp.UTC()) }
manager.go
// // SPDX-License-Identifier: BSD-3-Clause // package redfish import ( "encoding/json" "fmt" "reflect" "github.com/stmcginnis/gofish/common" ) // CommandConnectTypesSupported is the command connection type. type CommandConnectTypesSupported string const ( // SSHCommandConnectTypesSupported The controller supports a Command // Shell connection using the SSH protocol. SSHCommandConnectTypesSupported CommandConnectTypesSupported = "SSH" // TelnetCommandConnectTypesSupported The controller supports a Command // Shell connection using the Telnet protocol. TelnetCommandConnectTypesSupported CommandConnectTypesSupported = "Telnet" // IPMICommandConnectTypesSupported The controller supports a Command // Shell connection using the IPMI Serial-over-LAN (SOL) protocol. IPMICommandConnectTypesSupported CommandConnectTypesSupported = "IPMI" // OemCommandConnectTypesSupported The controller supports a Command // Shell connection using an OEM-specific protocol. OemCommandConnectTypesSupported CommandConnectTypesSupported = "Oem" ) // GraphicalConnectTypesSupported is graphical connection type. type GraphicalConnectTypesSupported string const ( // KVMIPGraphicalConnectTypesSupported The controller supports a // Graphical Console connection using a KVM-IP (redirection of Keyboard, // Video, Mouse over IP) protocol. KVMIPGraphicalConnectTypesSupported GraphicalConnectTypesSupported = "KVMIP" // OemGraphicalConnectTypesSupported The controller supports a Graphical // Console connection using an OEM-specific protocol. OemGraphicalConnectTypesSupported GraphicalConnectTypesSupported = "Oem" ) // UIConsoleInfo contains information about GUI services. type UIConsoleInfo struct { ServiceEnabled bool MaxConcurrentSessions uint ConnectTypesSupported []string } // SerialConsole shall describe a Serial Console service of a manager. type SerialConsole struct { // ConnectTypesSupported shall be an array of the enumerations provided // here. SSH shall be included if the Secure Shell (SSH) protocol is // supported. Telnet shall be included if the Telnet protocol is supported. // IPMI shall be included if the IPMI (Serial-over-LAN) protocol is supported. ConnectTypesSupported []SerialConnectTypesSupported // MaxConcurrentSessions shall contain the // maximum number of concurrent service sessions supported by the // implementation. MaxConcurrentSessions int // ServiceEnabled is used for the service. The value shall be true if // enabled and false if disabled. ServiceEnabled bool } // ManagerType shall describe the function of this manager. The value // EnclosureManager shall be used if this manager controls one or more services // through aggregation. The value BMC shall be used if this manager represents a // traditional server management controller. The value ManagementController // shall be used if none of the other enumerations apply. type ManagerType string const ( // ManagementControllerManagerType A controller used primarily to monitor // or manage the operation of a device or system. ManagementControllerManagerType ManagerType = "ManagementController" // EnclosureManagerManagerType A controller which provides management // functions for a chassis or group of devices or systems. EnclosureManagerManagerType ManagerType = "EnclosureManager" // BMCManagerType A controller which provides management functions for a // single computer system. BMCManagerType ManagerType = "BMC" // RackManagerManagerType A controller which provides management // functions for a whole or part of a rack. RackManagerManagerType ManagerType = "RackManager" // AuxiliaryControllerManagerType A controller which provides management // functions for a particular subsystem or group of devices. AuxiliaryControllerManagerType ManagerType = "AuxiliaryController" // ServiceManagerType A software-based service which provides management // functions. ServiceManagerType ManagerType = "Service" ) // ResetToDefaultsType is the default to set on reset. type ResetToDefaultsType string const ( // ResetAllResetToDefaultsType Reset all settings to factory defaults. ResetAllResetToDefaultsType ResetToDefaultsType = "ResetAll" // PreserveNetworkAndUsersResetToDefaultsType Reset all settings except // network and local user names/passwords to factory defaults. PreserveNetworkAndUsersResetToDefaultsType ResetToDefaultsType = "PreserveNetworkAndUsers" // PreserveNetworkResetToDefaultsType Reset all settings except network // settings to factory defaults. PreserveNetworkResetToDefaultsType ResetToDefaultsType = "PreserveNetwork" ) // SerialConnectTypesSupported is serial connection type. type SerialConnectTypesSupported string const ( // SSHSerialConnectTypesSupported The controller supports a Serial // Console connection using the SSH protocol. SSHSerialConnectTypesSupported SerialConnectTypesSupported = "SSH" // TelnetSerialConnectTypesSupported The controller supports a Serial // Console connection using the Telnet protocol. TelnetSerialConnectTypesSupported SerialConnectTypesSupported = "Telnet" // IPMISerialConnectTypesSupported The controller supports a Serial // Console connection using the IPMI Serial-over-LAN (SOL) protocol. IPMISerialConnectTypesSupported SerialConnectTypesSupported = "IPMI" // OemSerialConnectTypesSupported The controller supports a Serial // Console connection using an OEM-specific protocol. OemSerialConnectTypesSupported SerialConnectTypesSupported = "Oem" ) // CommandShell shall describe a Command Shell service of a manager. type CommandShell struct { // ConnectTypesSupported shall be an array of the enumerations provided here. // SSH shall be included if the Secure Shell (SSH) protocol is supported. // Telnet shall be included if the Telnet protocol is supported. IPMI shall // be included if the IPMI (Serial-over-LAN) protocol is supported. ConnectTypesSupported []CommandConnectTypesSupported // MaxConcurrentSessions shall contain the maximum number of concurrent // service sessions supported by the implementation. MaxConcurrentSessions uint32 // ServiceEnabled is used for the service. The value shall be true if // enabled and false if disabled. ServiceEnabled bool } // GraphicalConsole shall describe a Graphical Console service of a manager. type GraphicalConsole struct { // ConnectTypesSupported shall be an array of the enumerations provided here. // RDP shall be included if the Remote Desktop (RDP) protocol is supported. // KVMIP shall be included if a vendor-define KVM-IP protocol is supported. ConnectTypesSupported []GraphicalConnectTypesSupported // MaxConcurrentSessions shall contain the maximum number of concurrent // service sessions supported by the implementation. MaxConcurrentSessions uint32 // ServiceEnabled is used for the service. The value shall be true if // enabled and false if disabled. ServiceEnabled bool } // Manager is a management subsystem. Examples of managers are BMCs, Enclosure // Managers, Management Controllers and other subsystems assigned manageability // functions. type Manager struct { common.Entity // ODataContext is the odata context. ODataContext string `json:"@odata.context"` // ODataType is the odata type. ODataType string `json:"@odata.type"` // AutoDSTEnabled shall contain the enabled status of the automatic Daylight // Saving Time (DST) adjustment of the manager's DateTime. It shall be true // if Automatic DST adjustment is enabled and false if disabled. AutoDSTEnabled bool // CommandShell shall contain information // about the Command Shell service of this manager. CommandShell CommandShell // DateTime shall represent the current DateTime value for the manager, with // offset from UTC, in Redfish Timestamp format. DateTime string // DateTimeLocalOffset is The value is property shall represent the offset // from UTC time that the current value of DataTime property contains. DateTimeLocalOffset string // Description provides a description of this resource. Description string // ethernetInterfaces shall be a link to a collection of type // EthernetInterfaceCollection. ethernetInterfaces string // FirmwareVersion shall contain the firmware version as defined by the // manufacturer for the associated manager. FirmwareVersion string // GraphicalConsole shall contain the information about the Graphical // Console (KVM-IP) service of this manager. GraphicalConsole GraphicalConsole // hostInterfaces shall be a link to a collection of type // HostInterfaceCollection. hostInterfaces string // logServices shall contain a reference to a collection of type // LogServiceCollection which are for the use of this manager. logServices string // ManagerType is used if this manager controls one or more services // through aggregation. The value BMC shall be used if this manager // represents a traditional server management controller. The value // ManagementController shall be used if none of the other enumerations // apply. ManagerType ManagerType // Manufacturer shall contain the name of the organization responsible for // producing the manager. This organization might be the entity from whom // the manager is purchased, but this is not necessarily true. Manufacturer string // Model shall contain the information about how the manufacturer references // this manager. Model string // networkProtocol shall contain a reference to a resource of type // ManagerNetworkProtocol which represents the network services for this // manager. networkProtocol string // PartNumber shall contain a part number assigned by the organization that // is responsible for producing or manufacturing the manager. PartNumber string // PowerState shall contain the power state of the Manager. PowerState PowerState // Redundancy is used to show how this manager is grouped with other // managers for form redundancy sets. Redundancy []Redundancy // RedundancyCount is the number of Redundancy objects. RedundancyCount int `json:"[email protected]"` // remoteAccountService shall contain a reference to the // AccountService resource for the remote Manager represented by this // resource. This property shall only be present when providing // aggregation of Redfish services. remoteAccountService string // RemoteRedfishServiceURI shall contain the URI of the // Redfish Service Root for the remote Manager represented by this // resource. This property shall only be present when providing // aggregation of Redfish services. RemoteRedfishServiceURI string `json:"RemoteRedfishServiceUri"` // SerialConsole shall contain information about the Serial Console service // of this manager. SerialConsole SerialConsole // serialInterfaces shall be a link to a collection of type // SerialInterfaceCollection which are for the use of this manager. serialInterfaces string // SerialNumber shall contain a manufacturer-allocated number that // identifies the manager. SerialNumber string // ServiceEntryPointUUID shall contain the UUID of the Redfish Service // provided by this manager. Each Manager providing an Entry Point to the // same Redfish Service shall report the same UUID value (even though the // name of the property may imply otherwise). This property shall not be // present if this manager does not provide a Redfish Service Entry Point. ServiceEntryPointUUID string // Status shall contain any status or health properties // of the resource. Status common.Status // UUID shall contain the universal unique // identifier number for the manager. UUID string // virtualMedia shall contain a reference to a collection of type // VirtualMediaCollection which are for the use of this manager. virtualMedia string // managerForChassis shall contain an array of references to Chassis // resources of which this Manager instance has control. managerForChassis []string // ManagerForChassisCount is the number of Chassis being managed. ManagerForChassisCount int // managerForServers shall contain an array of references to ComputerSystem // resources of which this Manager instance has control. managerForServers []string // ManagerForServersCount is the number of Servers being managed. ManagerForServersCount int // managerForSwitches shall contain an array of references to Switch // resources of which this Manager instance has control. managerForSwitches []string // ManagerForSwitchesCount is the number of Switches being managed. ManagerForSwitchesCount int // managerInChassis shall contain a reference to the chassis that this // manager is located in. managerInChassis string // resetTarget is the internal URL to send reset targets to. resetTarget string // SupportedResetTypes, if provided, is the reset types this system supports. SupportedResetTypes []ResetType // rawData holds the original serialized JSON so we can compare updates. rawData []byte } // UnmarshalJSON unmarshals a Manager object from the raw JSON. func (manager *Manager) UnmarshalJSON(b []byte) error { type temp Manager type actions struct { Reset struct { AllowedResetTypes []ResetType `json:"[email protected]"` Target string } `json:"#Manager.Reset"` } type linkReference struct { ManagerForChassis common.Links ManagerForChassisCount int `json:"[email protected]"` ManagerForServers common.Links ManagerForServersCount int `json:"[email protected]"` ManagerForSwitches common.Links ManagerForSwitchesCount int `json:"[email protected]"` ManagerInChassis common.Link } var t struct { temp EthernetInterfaces common.Link LogServices common.Link NetworkProtocol common.Link RemoteAccountService common.Link SerialInterfaces common.Link VirtualMedia common.Link Links linkReference Actions actions } err := json.Unmarshal(b, &t) if err != nil { return err } // Extract the links to other entities *manager = Manager(t.temp) manager.ethernetInterfaces = string(t.EthernetInterfaces) manager.logServices = string(t.LogServices) manager.networkProtocol = string(t.NetworkProtocol) manager.remoteAccountService = string(t.RemoteAccountService) manager.serialInterfaces = string(t.SerialInterfaces) manager.virtualMedia = string(t.VirtualMedia) manager.managerForServers = t.Links.ManagerForServers.ToStrings() manager.ManagerForServersCount = t.Links.ManagerForServersCount manager.managerForChassis = t.Links.ManagerForChassis.ToStrings() manager.ManagerForChassisCount = t.Links.ManagerForChassisCount manager.ManagerForSwitchesCount = t.Links.ManagerForSwitchesCount manager.managerForSwitches = t.Links.ManagerForSwitches.ToStrings() manager.managerInChassis = string(t.Links.ManagerInChassis) manager.SupportedResetTypes = t.Actions.Reset.AllowedResetTypes manager.resetTarget = t.Actions.Reset.Target // This is a read/write object, so we need to save the raw object data for later manager.rawData = b return nil } // Update commits updates to this object's properties to the running system. func (manager *Manager) Update() error { // Get a representation of the object's original state so we can find what // to update. original := new(Manager) original.UnmarshalJSON(manager.rawData) readWriteFields := []string{ "AutoDSTEnabled", "DateTime", "DateTimeLocalOffset", } originalElement := reflect.ValueOf(original).Elem() currentElement := reflect.ValueOf(manager).Elem() return manager.Entity.Update(originalElement, currentElement, readWriteFields) } // GetManager will get a Manager instance from the Swordfish service. func GetManager(c common.Client, uri string) (*Manager, error)
// ListReferencedManagers gets the collection of Managers func ListReferencedManagers(c common.Client, link string) ([]*Manager, error) { var result []*Manager links, err := common.GetCollection(c, link) if err != nil { return result, err } for _, managerLink := range links.ItemLinks { manager, err := GetManager(c, managerLink) if err != nil { return result, err } result = append(result, manager) } return result, nil } // Reset shall perform a reset of the manager. func (manager *Manager) Reset(resetType ResetType) error { if len(manager.SupportedResetTypes) == 0 { // reset directly without reset type. HPE server has the behavior type temp struct { Action string } t := temp{ Action: "Manager.Reset", } _, err := manager.Client.Post(manager.resetTarget, t) return err } // Make sure the requested reset type is supported by the manager. valid := false for _, allowed := range manager.SupportedResetTypes { if resetType == allowed { valid = true break } } if !valid { return fmt.Errorf("reset type '%s' is not supported by this manager", resetType) } type temp struct { ResetType ResetType } t := temp{ ResetType: resetType, } _, err := manager.Client.Post(manager.resetTarget, t) return err } // EthernetInterfaces get this system's ethernet interfaces. func (manager *Manager) EthernetInterfaces() ([]*EthernetInterface, error) { return ListReferencedEthernetInterfaces(manager.Client, manager.ethernetInterfaces) } // LogServices get this manager's log services on this system. func (manager *Manager) LogServices() ([]*LogService, error) { return ListReferencedLogServices(manager.Client, manager.logServices) }
{ resp, err := c.Get(uri) if err != nil { return nil, err } defer resp.Body.Close() var manager Manager err = json.NewDecoder(resp.Body).Decode(&manager) if err != nil { return nil, err } manager.SetClient(c) return &manager, nil }
ac.py
"""Actor-Critic Algorithm.""" from rllib.util.neural_networks.utilities import broadcast_to_tensor from .abstract_algorithm import AbstractAlgorithm class ActorCritic(AbstractAlgorithm): r"""Implementation of Policy Gradient algorithm. Policy-Gradient is an on-policy model-free control algorithm. Policy-Gradient computes the policy gradient using a critic to estimate the returns (sum of discounted rewards). The Policy-Gradient algorithm is a policy gradient algorithm that estimates the gradient: .. math:: \grad J = \int_{\tau} \grad \log \pi(s_t) Q(s_t, a_t), where the previous integral is computed through samples (s_t, a_t) samples. Parameters ---------- policy: AbstractPolicy Policy to optimize. critic: AbstractQFunction Critic that evaluates the current policy. criterion: _Loss Criterion to optimize the baseline. gamma: float Discount factor. References ---------- Sutton, R. S., McAllester, D. A., Singh, S. P., & Mansour, Y. (2000). Policy gradient methods for reinforcement learning with function approximation. NeurIPS. Konda, V. R., & Tsitsiklis, J. N. (2000). Actor-critic algorithms. NeurIPS. Degris, T., White, M., & Sutton, R. S. (2012). Off-policy actor-critic. ICML """ def __init__(
self.standardize_returns = standardize_returns def returns(self, trajectory): """Estimate the returns of a trajectory.""" state, action = trajectory.state, trajectory.action weight = self.get_ope_weight(state, action, trajectory.log_prob_action) advantage = self.critic(state, action) weight = broadcast_to_tensor(input_tensor=weight, target_tensor=advantage) return weight * advantage def actor_loss(self, observation): """Get Actor loss.""" return self.score_actor_loss(observation, linearized=False).reduce( self.criterion.reduction )
self, num_policy_samples=15, standardize_returns=True, *args, **kwargs ): super().__init__(num_policy_samples=num_policy_samples, *args, **kwargs)
anyadirdron.ts
import { Component } from '@angular/core'; import { NavController, NavParams, Platform } from 'ionic-angular'; import { AngularFirestore } from 'angularfire2/firestore'; import { Storage } from '@ionic/storage'; import { LocalNotifications } from '@ionic-native/local-notifications/ngx'; import { TranslateService } from '@ngx-translate/core'; @Component({ selector: 'page-anyadirdron', templateUrl: 'anyadirdron.html', }) export class
{ constructor(public navCtrl: NavController, public navParams: NavParams, private angularFirestore: AngularFirestore, private storage: Storage, private platform: Platform, private localNotifications: LocalNotifications, private _translate: TranslateService) { } //Ejecuta una accion al abrirse la pantalla ionViewWillEnter() { this.platform.registerBackButtonAction(() => {this.navCtrl.pop()}); } //Añade el dron con los parametros introducidos addDron(apodo: string, marca: string, modelo: string, fechaAdquisicion: any, garantia: number, comentarios: string) { this.storage.get('UID').then( x => { this.angularFirestore.collection('usuarios/' + x + '/drones').add({apodo, marca, modelo, fechaAdquisicion, comentarios}); }); let fechaFinGarantiaNotificacion = new Date(fechaAdquisicion.year, fechaAdquisicion.month, fechaAdquisicion.day).getTime(); fechaFinGarantiaNotificacion = fechaFinGarantiaNotificacion + garantia*2592000000; fechaFinGarantiaNotificacion = fechaFinGarantiaNotificacion - 604800000; this._translate.get(['NOTIFICATION.TITLE', 'NOTIFICATION.MESSAGE']).subscribe(translate => { this.localNotifications.schedule({ title: translate['NOTIFICATION.TITLE'] + apodo, text: translate['NOTIFICATION.MESSAGE'] , trigger: {at: new Date(new Date().setTime(fechaFinGarantiaNotificacion))} }); }) this.navCtrl.popToRoot(); } }
AnyadirdronPage
exception_test.py
import pytest import stweet as st from stweet.auth import TwitterAuthTokenProvider, SimpleAuthTokenProvider from stweet.exceptions import RefreshTokenException, ScrapBatchBadResponse from tests.integration.mock_web_client import MockWebClient def test_get_auth_token_with_incorrect_response_1():
def test_get_simple_auth_token_with_incorrect_response_1(): with pytest.raises(RefreshTokenException): SimpleAuthTokenProvider(MockWebClient(None, None)).get_new_token() def test_get_auth_token_with_incorrect_response_2(): with pytest.raises(RefreshTokenException): TwitterAuthTokenProvider(MockWebClient(400, 'None')).get_new_token() def test_get_auth_token_with_incorrect_response_3(): with pytest.raises(RefreshTokenException): TwitterAuthTokenProvider(MockWebClient(200, 'None')).get_new_token() def test_runner_exceptions(): class TokenExpiryExceptionWebClient(st.WebClient): count_dict = dict({ 'https://twitter.com': 0, 'https://api.twitter.com/2/search/adaptive.json': 0 }) def run_request(self, params: st.http_request.RequestDetails) -> st.http_request.RequestResponse: self.count_dict[params.url] = self.count_dict[params.url] + 1 if params.url == 'https://api.twitter.com/2/search/adaptive.json': if self.count_dict[params.url] == 1: return st.http_request.RequestResponse(429, None) else: return st.http_request.RequestResponse(400, '') else: return st.http_request.RequestResponse(200, 'decodeURIComponent("gt=1330640566170869763; Max=10800;') with pytest.raises(ScrapBatchBadResponse): search_tweets_task = st.SearchTweetsTask( all_words='#koronawirus' ) st.TweetSearchRunner( search_tweets_task=search_tweets_task, tweet_outputs=[], web_client=TokenExpiryExceptionWebClient(), auth_token_provider_factory=st.auth.TwitterAuthTokenProviderFactory() ).run()
with pytest.raises(RefreshTokenException): TwitterAuthTokenProvider(MockWebClient(None, None)).get_new_token()
apps.py
from django.apps import AppConfig class UsersAppConfig(AppConfig):
name = "django_broadcast.users" verbose_name = "Users" def ready(self): """Override this to put in: Users system checks Users signal registration """ try: import users.signals # noqa F401 except ImportError: pass
news_20190701.bundle.js
!function(e){function t(t){for(var n,i,u=t[0],c=t[1],a=t[2],l=0,s=[];l<u.length;l++)i=u[l],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&s.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(t);s.length;)s.shift()();return p.push.apply(p,a||[]),r()}function r(){for(var e,t=0;t<p.length;t++){for(var r=p[t],n=!0,u=1;u<r.length;u++){var c=r[u];0!==o[c]&&(n=!1)}n&&(p.splice(t--,1),e=i(i.s=r[0]))}return e}var n={},o={23:0},p=[];function
(t){if(n[t])return n[t].exports;var r=n[t]={i:t,l:!1,exports:{}};return e[t].call(r.exports,r,r.exports,i),r.l=!0,r.exports}i.m=e,i.c=n,i.d=function(e,t,r){i.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,t){if(1&t&&(e=i(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(i.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var n in e)i.d(r,n,function(t){return e[t]}.bind(null,n));return r},i.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(t,"a",t),t},i.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},i.p="";var u=window.webpackJsonp=window.webpackJsonp||[],c=u.push.bind(u);u.push=t,u=u.slice();for(var a=0;a<u.length;a++)t(u[a]);var f=c;p.push([105,0]),r()}({105:function(e,t,r){r(1)(r(106))},106:function(e){e.exports=JSON.parse('{"widget":"news/news_doc_content","title":"罕见!大疆通过美国内政部审核","preface":"参考消息网7月11日报道 境外媒体报道称,大疆创新推出的大疆政企版无人机系统,已通过美国内政部的官方测试与独立验证。","datetime":"2019年7月11日 北京时间","htmlcontent":"<p>参考消息网7月11日报道 境外媒体报道称,大疆创新推出的大疆政企版无人机系统,已通过美国内政部的官方测试与独立验证。</p><p>据香港《南华早报》网站7月10日报道,美国内政部负责评估和批准用于各种用途的无人机技术。该部表示,在对大疆无人机进行的15个月严格评估中,没有发现数据在系统外传输。</p><p>参考消息网了解到,美国内政部测试的是,大疆经纬Matrice 600 Pro和“御”Mavic Pro上配置的大疆政企版无人机系统,包括飞行器的飞行性能、有效载荷和数据安全管理性能。美国内政部肯定其符合内政部的技术与风险要求,并同意在内部使用大疆的无人机产品与系统。</p><p>美国内政部在一份报告中称,内政部与大疆创新已合作超过两年,双方共同创建了一个无人机解决方案。美国内政部同意其机构使用大疆定制化的飞行器及软件(即“大疆政企版无人机系统”),经过测试,确认飞行器符合安全标准。</p><p>据《南华早报》网站报道,大疆创新北美区域副总裁马里奥·雷贝洛在一份声明中说:“我们在持续不断地优化软硬件技术方案,以满足客户不断变化的数据安全需求。内政部此次报告是极大认可了DJI大疆创新多年来在数据安全管理方面的努力。”</p><p>该网站称,中国科技公司近期处在遭到怀疑和限制的氛围中,因此这项赢得美国政府审核的案例实属罕见。</p><p>今年5月,美国有线电视新闻网(CNN)曾报道说,美国国土安全部提醒美国企业“注意”它们的无人机数据是由供应商还是其他第三方存储的。这一警告没有指明任何制造商,但引发了关于大疆创新可能被禁止在美国销售的猜测。</p><p>《南华早报》网站援引乔治敦大学外交学院安全与新兴技术研究中心学者洛兰·洛什考伊的说法称,这次测试“清楚地表明,数据安全问题有可能得到可行地解决”。</p><p>据参考消息网了解,大疆创新官方表示,将继续倾听包括美国内政部在内各方面客户的意见,并围绕行业标准进行讨论,以不断改进产品。</p><center><figure class=\\"figure\\"><img class=\\"figure-img img-fluid rounded\\" src=\\"./assets/imgs/news/news-20190701-01.jpg\\"><figcaption class=\\"figure-caption\\">资料图:观众在大疆展区参观无人机。(新华社)</figcaption></figure></center>"}')}});
i
AssetInlinePDFFragment.graphql.ts
/* tslint:disable */ /* eslint-disable */ // @ts-nocheck import { ReaderFragment } from "relay-runtime"; import { FragmentRefs } from "relay-runtime"; export type AssetInlinePDFFragment = { readonly downloadUrl?: string | null | undefined; readonly " $fragmentRefs": FragmentRefs<"AssetDownloadButtonFragment">; readonly " $refType": "AssetInlinePDFFragment"; }; export type AssetInlinePDFFragment$data = AssetInlinePDFFragment; export type AssetInlinePDFFragment$key = { readonly " $data"?: AssetInlinePDFFragment$data | undefined; readonly " $fragmentRefs": FragmentRefs<"AssetInlinePDFFragment">; }; const node: ReaderFragment = { "argumentDefinitions": [], "kind": "Fragment", "metadata": null, "name": "AssetInlinePDFFragment", "selections": [ { "kind": "InlineFragment", "selections": [ { "alias": null, "args": null, "kind": "ScalarField", "name": "downloadUrl", "storageKey": null } ], "type": "AssetPDF", "abstractKey": null }, { "args": null,
"type": "Asset", "abstractKey": "__isAsset" }; (node as any).hash = 'b63d9ec38520d0c9138a89259863724d'; export default node;
"kind": "FragmentSpread", "name": "AssetDownloadButtonFragment" } ],
display.ts
import { Equatable } from "@siteimprove/alfa-equatable"; import { Hash, Hashable } from "@siteimprove/alfa-hash"; import { Serializable } from "@siteimprove/alfa-json"; import * as json from "@siteimprove/alfa-json"; /** * @public */ export class Display implements Equatable, Hashable, Serializable { public static of( resolution: number, scan: Display.Scan = Display.Scan.Progressive ): Display { return new Display(resolution, scan); } private readonly _resolution: number; private readonly _scan: Display.Scan; private constructor(resolution: number, scan: Display.Scan) { this._resolution = resolution; this._scan = scan; } /** * {@link https://www.w3.org/TR/mediaqueries/#resolution} */ public get resolution(): number { return this._resolution; } /** * {@link https://www.w3.org/TR/mediaqueries/#scan} */ public get scan(): Display.Scan { return this._scan; } public equals(value: unknown): value is this { return ( value instanceof Display && value._resolution === this._resolution && value._scan === this._scan ); } public hash(hash: Hash): void { hash.writeUint8(this._resolution); switch (this._scan) { case Display.Scan.Interlace: hash.writeUint8(1); break; case Display.Scan.Progressive: hash.writeUint8(2); } } public toJSON(): Display.JSON { return { resolution: this._resolution, scan: this._scan, }; } } /** * @public */ export namespace Display { export enum Scan { Interlace = "interlace", Progressive = "progressive", } export interface JSON { [key: string]: json.JSON; resolution: number; scan: `${Scan}`; }
export function from(json: JSON): Display { return Display.of(json.resolution, json.scan as Scan); } export function standard(): Display { return Display.of(1, Scan.Progressive); } }
operation_conversions.rs
// Copyright © 2021 HQS Quantum Simulations GmbH. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing permissions and // limitations under the License. use ndarray::{array, Array1, Array2}; use num_complex::Complex64; use pyo3::Python; use qoqo::operations::*; use qoqo_calculator::CalculatorFloat; use roqoqo::operations::*; use roqoqo::Circuit; use std::collections::HashMap; use std::f64::consts::PI; use test_case::test_case; /// Test convert_operation_to_pyobject and convert_pyany_to_operation #[test_case(Operation::from(RotateZ::new(1, CalculatorFloat::from(1.3))); "RotateZ float")] #[test_case(Operation::from(RotateX::new(0, CalculatorFloat::from(0))); "RotateX float")] #[test_case(Operation::from(RotateY::new(0, CalculatorFloat::from(PI))); "RotateY float")] #[test_case(Operation::from( SingleQubitGate::new( 0, CalculatorFloat::from(0), CalculatorFloat::from(0), CalculatorFloat::from(0), CalculatorFloat::from(0), CalculatorFloat::from(0), ) ); "SingleQubitGate") ] #[test_case(Operation::from( RotateAroundSphericalAxis::new( 0, CalculatorFloat::from(PI), CalculatorFloat::from(0), CalculatorFloat::from(PI / 4.0), ) ); "RotateAroundSphericalAxis") ] #[test_case(Operation::from(PauliX::new(1)); "PauliX")] #[test_case(Operation::from(PauliY::new(1)); "PauliY")] #[test_case(Operation::from(PauliZ::new(1)); "PauliZ")] #[test_case(Operation::from(SqrtPauliX::new(100)); "SqrtPauliX")] #[test_case(Operation::from(InvSqrtPauliX::new(100)); "InvSqrtPauliX")] #[test_case(Operation::from(SGate::new(1)); "SGate")] #[test_case(Operation::from(TGate::new(1)); "TGate")] #[test_case(Operation::from(Hadamard::new(3)); "Hadamard")] #[test_case(Operation::from(CNOT::new(0, 1)); "CNOT")] #[test_case(Operation::from(SWAP::new(0, 1)); "SWAP")] #[test_case(Operation::from(ISwap::new(0, 1)); "ISwap")] #[test_case(Operation::from(FSwap::new(0, 1)); "FSwap")] #[test_case(Operation::from(SqrtISwap::new(0, 1)); "SqrtISwap")] #[test_case(Operation::from(InvSqrtISwap::new(0, 1)); "InvSqrtISwap")] #[test_case(Operation::from(XY::new(0, 1, CalculatorFloat::PI)); "XY")] #[test_case(Operation::from(ControlledPhaseShift::new(0, 1, CalculatorFloat::FRAC_PI_4)); "ControlledPhaseShift")] #[test_case(Operation::from(ControlledPauliY::new(0, 1)); "ControlledPauliY")] #[test_case(Operation::from(ControlledPauliZ::new(0, 1)); "ControlledPauliZ")] #[test_case(Operation::from(MolmerSorensenXX::new(0, 1)); "MolmerSorensenXX")] #[test_case(Operation::from(VariableMSXX::new(0, 1, CalculatorFloat::PI)); "VariableMSXX")] #[test_case(Operation::from(GivensRotation::new(0, 1, CalculatorFloat::PI, CalculatorFloat::FRAC_PI_4)); "GivensRotation")] #[test_case(Operation::from(GivensRotationLittleEndian::new(0, 1, CalculatorFloat::PI, CalculatorFloat::FRAC_PI_4)); "GivensRotationLittleEndian")] #[test_case(Operation::from(Qsim::new(0, 1, CalculatorFloat::from(1.0), CalculatorFloat::from(1.0), CalculatorFloat::from(-1.0))); "Qsim")] #[test_case(Operation::from(Fsim::new(0, 1, CalculatorFloat::from(1.0), CalculatorFloat::from(1.0), CalculatorFloat::from(-1.0))); "Fsim")] #[test_case(Operation::from(SpinInteraction::new(0, 1, CalculatorFloat::from(1.0), CalculatorFloat::from(2.0), CalculatorFloat::from(-1.0))); "SpinInteraction")] #[test_case(Operation::from(Bogoliubov::new(0, 1, CalculatorFloat::from(1.0), CalculatorFloat::from(-1.0))); "Bogoliubov")] #[test_case(Operation::from(PMInteraction::new(0, 1, CalculatorFloat::PI)); "PMInteraction")] #[test_case(Operation::from(ComplexPMInteraction::new(0, 1, CalculatorFloat::from(1.0), CalculatorFloat::from(-1.0))); "ComplexPMInteraction")] #[test_case(Operation::from(DefinitionFloat::new(String::from("ro"), 1, false)); "DefinitionFloat")] #[test_case(Operation::from(DefinitionComplex::new(String::from("ro"), 1, false)); "DefinitionComplex")] #[test_case(Operation::from(DefinitionUsize::new(String::from("ro"), 1, false)); "DefinitionUsize")]
#[test_case(Operation::from(DefinitionBit::new(String::from("ro"), 1, false)); "DefinitionBit")] #[test_case(Operation::from(InputSymbolic::new(String::from("ro"), 1.0)); "InputSymbolic")] #[test_case(Operation::from(MeasureQubit::new(0, String::from("ro"), 1)); "MeasureQubit")] #[test_case(Operation::from(PragmaGetStateVector::new(String::from("ro"), Some(create_circuit()))); "PragmaGetStateVector")] #[test_case(Operation::from(PragmaGetDensityMatrix::new(String::from("ro"), Some(create_circuit()))); "PragmaGetDensityMatrix")] #[test_case(Operation::from(PragmaGetOccupationProbability::new(String::from("ro"), Some(create_circuit()))); "PragmaGetOccupationProbability")] #[test_case(Operation::from(PragmaGetPauliProduct::new(reordering(), String::from("ro"), create_circuit())); "PragmaGetPauliProduct")] #[test_case(Operation::from(PragmaRepeatedMeasurement::new(String::from("ro"), 2, Some(reordering()))); "PragmaRepeatedMeasurement")] #[test_case(Operation::from(PragmaSetNumberOfMeasurements::new(1, String::from("ro"))); "PragmaSetNumberOfMeasurements")] #[test_case(Operation::from(PragmaSetStateVector::new(statevector())); "PragmaSetStateVector")] #[test_case(Operation::from(PragmaSetDensityMatrix::new(densitymatrix())); "PragmaSetDensityMatrix")] #[test_case(Operation::from(PragmaRepeatGate::new(3)); "PragmaRepeatGate")] #[test_case(Operation::from(PragmaBoostNoise::new(CalculatorFloat::from(0.003))); "PragmaBoostNoise")] #[test_case(Operation::from(PragmaStopParallelBlock::new(vec![0, 1], CalculatorFloat::from(0.0000001))); "PragmaStopParallelBlock")] #[test_case(Operation::from(PragmaGlobalPhase::new(CalculatorFloat::from(0.05))); "PragmaGlobalPhase")] #[test_case(Operation::from(PragmaSleep::new(vec![0, 1], CalculatorFloat::from(0.0000001))); "PragmaSleep")] #[test_case(Operation::from(PragmaActiveReset::new(0)); "PragmaActiveReset")] #[test_case(Operation::from(PragmaOverrotation::new("RotateX".to_string(), vec![0], 0.03, 0.001)); "PragmaOverrotation")] #[test_case(Operation::from(PragmaStartDecompositionBlock::new(vec![0, 1], reordering())); "PragmaStartDecompositionBlock")] #[test_case(Operation::from(PragmaStopDecompositionBlock::new(vec![0, 1])); "PragmaStopDecompositionBlock")] #[test_case(Operation::from(PragmaDamping::new(0, CalculatorFloat::from(0.005), CalculatorFloat::from(0.02))); "PragmaDamping")] #[test_case(Operation::from(PragmaDepolarising::new(0, CalculatorFloat::from(0.005), CalculatorFloat::from(0.02))); "PragmaDepolarising")] #[test_case(Operation::from(PragmaDephasing::new(0, CalculatorFloat::from(0.005), CalculatorFloat::from(0.02))); "PragmaDephasing")] #[test_case(Operation::from(PragmaRandomNoise::new(0, CalculatorFloat::from(0.005), CalculatorFloat::from(0.02), CalculatorFloat::from(0.01))); "PragmaRandomNoise")] #[test_case(Operation::from(PragmaGeneralNoise::new(0, CalculatorFloat::from(0.005), operators())); "PragmaGeneralNoise")] #[test_case(Operation::from(PragmaConditional::new(String::from("ro"), 1, create_circuit())); "PragmaConditional")] fn test_conversion(input: Operation) { pyo3::prepare_freethreaded_python(); Python::with_gil(|py| { let operation = convert_operation_to_pyobject(input.clone()).unwrap(); let output = convert_pyany_to_operation(operation.as_ref(py)).unwrap(); assert_eq!(input, output) }) } // ---------------- Helper functions ---------------- // fn reordering() -> HashMap<usize, usize> { let mut reordering: HashMap<usize, usize> = HashMap::new(); reordering.insert(0, 1); reordering } fn statevector() -> Array1<Complex64> { let statevector: Array1<Complex64> = array![ Complex64::new(1.0, 0.0), Complex64::new(0.0, 0.0), Complex64::new(0.0, 0.0), Complex64::new(0.0, 0.0) ]; statevector } fn densitymatrix() -> Array2<Complex64> { let densitymatrix: Array2<Complex64> = array![ [Complex64::new(1.0, 0.0), Complex64::new(0.0, 0.0)], [Complex64::new(0.0, 0.0), Complex64::new(0.0, 0.0)], ]; densitymatrix } fn operators() -> Array2<f64> { let operators: Array2<f64> = array![[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0],]; operators } fn create_circuit() -> Circuit { let mut circuit = Circuit::new(); circuit.add_operation(PauliX::new(0)); circuit }
config-v18.go
/* * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "errors" "fmt" "io/ioutil" "sync" "github.com/minio/minio/pkg/quick" "github.com/tidwall/gjson" ) // Config version const v18 = "18" var ( // serverConfig server config. serverConfig *serverConfigV18 serverConfigMu sync.RWMutex ) // serverConfigV18 server configuration version '18' which is like // version '17' except it adds support for "deliveryMode" parameter in // the AMQP notification target. type serverConfigV18 struct { sync.RWMutex Version string `json:"version"` // S3 API configuration. Credential credential `json:"credential"` Region string `json:"region"` Browser BrowserFlag `json:"browser"` // Additional error logging configuration. Logger *loggers `json:"logger"` // Notification queue configuration. Notify *notifier `json:"notify"` } // GetVersion get current config version. func (s *serverConfigV18) GetVersion() string { s.RLock() defer s.RUnlock() return s.Version } // SetRegion set new region. func (s *serverConfigV18) SetRegion(region string) { s.Lock() defer s.Unlock() s.Region = region } // GetRegion get current region. func (s *serverConfigV18) GetRegion() string { s.RLock() defer s.RUnlock() return s.Region } // SetCredentials set new credentials. func (s *serverConfigV18) SetCredential(creds credential) { s.Lock() defer s.Unlock() // Set updated credential. s.Credential = creds } // GetCredentials get current credentials. func (s *serverConfigV18) GetCredential() credential { s.RLock() defer s.RUnlock() return s.Credential } // SetBrowser set if browser is enabled. func (s *serverConfigV18) SetBrowser(b bool) { s.Lock() defer s.Unlock() // Set the new value. s.Browser = BrowserFlag(b) } // GetCredentials get current credentials. func (s *serverConfigV18) GetBrowser() bool { s.RLock() defer s.RUnlock() return bool(s.Browser) } // Save config. func (s *serverConfigV18) Save() error { s.RLock() defer s.RUnlock() // Save config file. return quick.Save(getConfigFile(), s) } func newServerConfigV18() *serverConfigV18 { srvCfg := &serverConfigV18{ Version: v18, Credential: mustGetNewCredential(), Region: globalMinioDefaultRegion, Browser: true, Logger: &loggers{}, Notify: &notifier{}, } // Enable console logger by default on a fresh run. srvCfg.Logger.Console = NewConsoleLogger() // Make sure to initialize notification configs. srvCfg.Notify.AMQP = make(map[string]amqpNotify) srvCfg.Notify.AMQP["1"] = amqpNotify{} srvCfg.Notify.ElasticSearch = make(map[string]elasticSearchNotify) srvCfg.Notify.ElasticSearch["1"] = elasticSearchNotify{} srvCfg.Notify.Redis = make(map[string]redisNotify) srvCfg.Notify.Redis["1"] = redisNotify{} srvCfg.Notify.NATS = make(map[string]natsNotify) srvCfg.Notify.NATS["1"] = natsNotify{} srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify) srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{} srvCfg.Notify.MySQL = make(map[string]mySQLNotify) srvCfg.Notify.MySQL["1"] = mySQLNotify{} srvCfg.Notify.Kafka = make(map[string]kafkaNotify) srvCfg.Notify.Kafka["1"] = kafkaNotify{} srvCfg.Notify.Webhook = make(map[string]webhookNotify) srvCfg.Notify.Webhook["1"] = webhookNotify{} return srvCfg } // newConfig - initialize a new server config, saves env parameters if // found, otherwise use default parameters func newConfig() error { // Initialize server config. srvCfg := newServerConfigV18() // If env is set override the credentials from config file. if globalIsEnvCreds { srvCfg.SetCredential(globalActiveCred) } if globalIsEnvBrowser { srvCfg.SetBrowser(globalIsBrowserEnabled) } if globalIsEnvRegion { srvCfg.SetRegion(globalServerRegion) } // hold the mutex lock before a new config is assigned. // Save the new config globally. // unlock the mutex. serverConfigMu.Lock() serverConfig = srvCfg serverConfigMu.Unlock() // Save config into file. return serverConfig.Save() } // doCheckDupJSONKeys recursively detects duplicate json keys func doCheckDupJSONKeys(key, value gjson.Result) error { // Key occurrences map of the current scope to count // if there is any duplicated json key. keysOcc := make(map[string]int) // Holds the found error var checkErr error // Iterate over keys in the current json scope value.ForEach(func(k, v gjson.Result) bool { // If current key is not null, check if its // value contains some duplicated keys. if k.Type != gjson.Null { keysOcc[k.String()]++ checkErr = doCheckDupJSONKeys(k, v) } return checkErr == nil }) // Check found err if checkErr != nil { return errors.New(key.String() + " => " + checkErr.Error()) } // Check for duplicated keys for k, v := range keysOcc { if v > 1 { return errors.New(key.String() + " => `" + k + "` entry is duplicated") } } return nil } // Check recursively if a key is duplicated in the same json scope // e.g.: // `{ "key" : { "key" ..` is accepted // `{ "key" : { "subkey" : "val1", "subkey": "val2" ..` throws subkey duplicated error func checkDupJSONKeys(json string) error { // Parse config with gjson library config := gjson.Parse(json) // Create a fake rootKey since root json doesn't seem to have representation // in gjson library. rootKey := gjson.Result{Type: gjson.String, Str: minioConfigFile} // Check if loaded json contains any duplicated keys return doCheckDupJSONKeys(rootKey, config) } // getValidConfig - returns valid server configuration func getValidConfig() (*serverConfigV18, error) { srvCfg := &serverConfigV18{ Region: globalMinioDefaultRegion, Browser: true, } configFile := getConfigFile() if _, err := quick.Load(configFile, srvCfg); err != nil { return nil, err } if srvCfg.Version != v18 { return nil, fmt.Errorf("configuration version mismatch. Expected: ‘%s’, Got: ‘%s’", v18, srvCfg.Version) } // Load config file json and check for duplication json keys jsonBytes, err := ioutil.ReadFile(configFile) if err != nil { retu
= checkDupJSONKeys(string(jsonBytes)); err != nil { return nil, err } // Validate region field if srvCfg.Region == "" { return nil, errors.New("Region config value cannot be empty") } // Validate credential fields only when // they are not set via the environment // Error out if global is env credential is not set and config has invalid credential if !globalIsEnvCreds && !srvCfg.Credential.IsValid() { return nil, errors.New("invalid credential in config file " + configFile) } // Validate logger field if err = srvCfg.Logger.Validate(); err != nil { return nil, err } // Validate notify field if err = srvCfg.Notify.Validate(); err != nil { return nil, err } return srvCfg, nil } // loadConfig - loads a new config from disk, overrides params from env // if found and valid func loadConfig() error { srvCfg, err := getValidConfig() if err != nil { return err } // If env is set override the credentials from config file. if globalIsEnvCreds { srvCfg.SetCredential(globalActiveCred) } if globalIsEnvBrowser { srvCfg.SetBrowser(globalIsBrowserEnabled) } if globalIsEnvRegion { srvCfg.SetRegion(globalServerRegion) } // hold the mutex lock before a new config is assigned. serverConfigMu.Lock() serverConfig = srvCfg if !globalIsEnvCreds { globalActiveCred = serverConfig.GetCredential() } if !globalIsEnvBrowser { globalIsBrowserEnabled = serverConfig.GetBrowser() } if !globalIsEnvRegion { globalServerRegion = serverConfig.GetRegion() } serverConfigMu.Unlock() return nil }
rn nil, err } if err
message.rs
use super::{Result, WebSocketError}; use serde::de::DeserializeOwned; use wasm_bindgen::{JsCast, JsValue}; use web_sys::MessageEvent; #[allow(clippy::module_name_repetitions)] #[derive(Debug)] pub struct WebSocketMessage { pub(crate) data: JsValue, pub(crate) message_event: MessageEvent, } impl WebSocketMessage { /// Return message data as `String`. /// /// # Errors /// /// Returns `WebSocketError::TextError` if data isn't a valid utf-8 string. pub fn text(&self) -> Result<String> { self.data.as_string().ok_or(WebSocketError::TextError( "data is not a valid utf-8 string", )) } /// JSON parse message data into provided type. /// /// # Errors /// /// Returns /// - `WebSocketError::TextError` if data isn't a valid utf-8 string. /// - `WebSocketError::SerdeError` when JSON decoding fails. pub fn json<T: DeserializeOwned + 'static>(&self) -> Result<T> { let text = self.text()?; serde_json::from_str(&text).map_err(WebSocketError::SerdeError) } /// Return message data as `Vec<u8>`. /// /// # Errors /// /// Returns: /// - `WebSocketError::PromiseError` when loading bytes from `Blob` fails. /// - `WebSocketError::TextError` if the message data isn't binary but also not a valid utf-8 string. pub async fn bytes(&self) -> Result<Vec<u8>> { if let Some(array_buffer) = self.data.dyn_ref::<js_sys::ArrayBuffer>() { let bytes = js_sys::Uint8Array::new(array_buffer).to_vec(); return Ok(bytes); } if let Some(blob) = self.data.dyn_ref::<web_sys::Blob>() { let blob = gloo_file::Blob::from(blob.to_owned()); let bytes = gloo_file::futures::read_as_bytes(&blob) .await .map_err(WebSocketError::FileReaderError)?; return Ok(bytes); } Ok(self.text()?.into_bytes()) } /// Return message data as `Blob`. /// /// # Errors /// /// Returns `WebSocketError::TextError` if the message data is neither binary nor a valid utf-8 string. pub fn blob(self) -> Result<gloo_file::Blob> { if self.contains_array_buffer() { let array_buffer = self.data.unchecked_into::<js_sys::ArrayBuffer>(); return Ok(gloo_file::Blob::new(array_buffer)); } if self.contains_blob() { let blob = self.data.unchecked_into::<web_sys::Blob>(); return Ok(gloo_file::Blob::from(blob)); } Ok(gloo_file::Blob::new(self.text()?.as_str())) } /// Is message data `ArrayBuffer`? pub fn
(&self) -> bool { self.data.has_type::<js_sys::ArrayBuffer>() } /// Is message data `Blob`? pub fn contains_blob(&self) -> bool { self.data.has_type::<web_sys::Blob>() } /// Is message data `String`? pub fn contains_text(&self) -> bool { self.data.has_type::<js_sys::JsString>() } /// Get underlying data as `wasm_bindgen::JsValue`. /// /// This is an escape path if current API can't handle your needs. /// Should you find yourself using it, please consider [opening an issue][issue]. /// /// [issue]: https://github.com/seed-rs/seed/issues pub const fn raw_data(&self) -> &JsValue { &self.data } /// Get underlying `web_sys::MessageEvent`. /// /// This is an escape path if current API can't handle your needs. /// Should you find yourself using it, please consider [opening an issue][issue]. /// /// [issue]: https://github.com/seed-rs/seed/issues pub const fn raw_message(&self) -> &web_sys::MessageEvent { &self.message_event } } #[cfg(test)] pub mod tests { use crate::browser::web_socket::WebSocketMessage; use wasm_bindgen_test::*; wasm_bindgen_test_configure!(run_in_browser); #[wasm_bindgen_test] async fn get_bytes_from_message() { let bytes = "some test message".as_bytes(); let blob = gloo_file::Blob::new(bytes); let message_event = web_sys::MessageEvent::new("test").unwrap(); let ws_msg = WebSocketMessage { data: blob.into(), message_event, }; let result_bytes = ws_msg.bytes().await.unwrap(); assert_eq!(bytes, &*result_bytes); } }
contains_array_buffer
Keyboard.ts
export const NUMBER_0 = 48 export const NUMBER_1 = 49 export const NUMBER_2 = 50 export const NUMBER_3 = 51 export const NUMBER_4 = 52 export const NUMBER_5 = 53 export const NUMBER_6 = 54 export const NUMBER_7 = 55 export const NUMBER_8 = 56 export const NUMBER_9 = 57 export const A = 65 export const B = 66 export const C = 67 export const D = 68 export const E = 69 export const F = 70 export const G = 71 export const H = 72 export const I = 73 export const J = 74 export const K = 75 export const L = 76 export const M = 77 export const N = 78 export const O = 79 export const P = 80 export const Q = 81 export const R = 82 export const S = 83 export const T = 84 export const U = 85 export const V = 86 export const W = 87 export const X = 88 export const Y = 89 export const Z = 90 export const NUMPAD_0 = 96 export const NUMPAD_1 = 97
export const NUMPAD_6 = 102 export const NUMPAD_7 = 103 export const NUMPAD_8 = 104 export const NUMPAD_9 = 105 export const NUMPAD_MULTIPLY = 106 export const NUMPAD_ADD = 107 export const NUMPAD_ENTER = 108 export const NUMPAD_SUBTRACT = 109 export const NUMPAD_DECIMAL = 110 export const NUMPAD_DIVIDE = 111 export const F1 = 112 export const F2 = 113 export const F3 = 114 export const F4 = 115 export const F5 = 116 export const F6 = 117 export const F7 = 118 export const F8 = 119 export const F9 = 120 export const F10 = 121 // F10 is used by browser export const F11 = 122 export const F12 = 123 export const F13 = 124 export const F14 = 125 export const F15 = 126 export const BACKSPACE = 8 export const TAB = 9 export const ALTERNATE = 18 export const ENTER = 13 export const COMMAND = 15 export const SHIFT = 16 export const CONTROL = 17 export const BREAK = 19 export const CAPS_LOCK = 20 export const NUMPAD = 21 export const ESCAPE = 27 export const SPACE = 32 export const PAGE_UP = 33 export const PAGE_DOWN = 34 export const END = 35 export const HOME = 36 export const LEFT = 37 export const RIGHT = 39 export const UP = 38 export const DOWN = 40 export const INSERT = 45 export const DELETE = 46 export const NUMLOCK = 144 export const SEMICOLON = 186 export const EQUAL = 187 export const COMMA = 188 export const MINUS = 189 export const PERIOD = 190 export const SLASH = 191 export const BACKQUOTE = 192 export const LEFTBRACKET = 219 export const BACKSLASH = 220 export const RIGHTBRACKET = 221 export const QUOTE = 222
export const NUMPAD_2 = 98 export const NUMPAD_3 = 99 export const NUMPAD_4 = 100 export const NUMPAD_5 = 101
mod.rs
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ use common::SourceLocationKey; use fixture_tests::Fixture; use graphql_ir::{build, Program}; use graphql_syntax::parse_executable; use graphql_test_helpers::diagnostics_to_sorted_string; use graphql_transforms::{required_directive, FeatureFlags}; use interner::Intern; use relay_codegen::{print_fragment, print_operation}; use std::sync::Arc; use test_schema::{get_test_schema, get_test_schema_with_extensions}; pub fn
(fixture: &Fixture) -> Result<String, String> { let parts: Vec<_> = fixture.content.split("%extensions%").collect(); let (base, schema) = match parts.as_slice() { [base, extensions] => (base, get_test_schema_with_extensions(extensions)), [base] => (base, get_test_schema()), _ => panic!("Invalid fixture input {}", fixture.content), }; let ast = parse_executable(base, SourceLocationKey::standalone(fixture.file_name)).unwrap(); let ir = build(&schema, &ast.definitions) .map_err(|diagnostics| diagnostics_to_sorted_string(fixture.content, &diagnostics))?; let program = Program::from_definitions(Arc::clone(&schema), ir); required_directive( &program, &FeatureFlags { enable_required_transform_for_prefix: Some("".intern()), enable_flight_transform: false, }, ) .map(|next_program| { next_program .fragments() .map(|def| print_fragment(&schema, &def)) .chain( next_program .operations() .map(|def| print_operation(&schema, &def)), ) .collect::<Vec<_>>() .join("\n\n") }) .map_err(|diagnostics| diagnostics_to_sorted_string(fixture.content, &diagnostics)) }
transform_fixture
public_api.ts
export * from './lib/services/diagnostic.service'; export * from './lib/services/generic-support-topic.service'; export * from './lib/services/comms.service'; export * from './lib/services/telemetry/telemetry.service'; export * from './lib/services/detector-control.service'; export * from './lib/services/telemetry/telemetry.common'; export * from './lib/services/feature-navigation.service'; export * from './lib/services/diagnostic-site.service'; export * from './lib/services/unhandled-exception-handler.service'; export * from './lib/services/solution.service'; export * from './lib/services/settings.service'; export * from './lib/config/diagnostic-data-config'; export * from './lib/diagnostic-data.module'; export * from './lib/models/detector'; export * from './lib/models/insight'; export * from './lib/models/loading'; export * from './lib/models/communication'; export * from './lib/models/compiler-response'; export * from './lib/models/compilation-properties'; export * from './lib/models/solution-type-tag';
/* * Public API Surface of diagnostic-data */
bootstrap-datepicker.js
/* ========================================================= * bootstrap-datepicker.js * Repo: https://github.com/eternicode/bootstrap-datepicker/ * Demo: eternicode.github.io/bootstrap-datepicker/ * Docs: bootstrap-datepicker.readthedocs.org/ * Forked from www.eyecon.ro/bootstrap-datepicker * ========================================================= * Started by Stefan Petre; improvements by Andrew Rowls + contributors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ========================================================= */ (function($, undefined){ var $window = $(window); function UTCDate(){ return new Date(Date.UTC.apply(Date, arguments)); } function UTCToday(){ var today = new Date(); return UTCDate(today.getFullYear(), today.getMonth(), today.getDate()); } function alias(method){ return function(){ return this[method].apply(this, arguments); }; } var DateArray = (function(){ var extras = { get: function(i){ return this.slice(i)[0]; }, contains: function(d){ // Array.indexOf is not cross-browser; // $.inArray doesn't work with Dates var val = d && d.valueOf(); for (var i=0, l=this.length; i < l; i++) if (this[i].valueOf() === val) return i; return -1; }, remove: function(i){ this.splice(i,1); }, replace: function(new_array){ if (!new_array) return; if (!$.isArray(new_array)) new_array = [new_array]; this.clear(); this.push.apply(this, new_array); }, clear: function(){ this.splice(0); }, copy: function(){ var a = new DateArray(); a.replace(this); return a; } }; return function(){ var a = []; a.push.apply(a, arguments); $.extend(a, extras); return a; }; })(); // Picker object var Datepicker = function(element, options){ this.dates = new DateArray(); this.viewDate = UTCToday(); this.focusDate = null; this._process_options(options); this.element = $(element); this.isInline = false; this.isInput = this.element.is('input'); this.component = this.element.is('.date') ? this.element.find('.add-on, .input-group-addon, .btn') : false; this.hasInput = this.component && this.element.find('input').length; if (this.component && this.component.length === 0) this.component = false; this.picker = $(DPGlobal.template); this._buildEvents(); this._attachEvents(); if (this.isInline){ this.picker.addClass('datepicker-inline').appendTo(this.element); } else { this.picker.addClass('datepicker-dropdown dropdown-menu'); } if (this.o.rtl){ this.picker.addClass('datepicker-rtl'); } this.viewMode = this.o.startView; if (this.o.calendarWeeks) this.picker.find('tfoot th.today') .attr('colspan', function(i, val){ return parseInt(val) + 1; }); this._allow_update = false; this.setStartDate(this._o.startDate); this.setEndDate(this._o.endDate); this.setDaysOfWeekDisabled(this.o.daysOfWeekDisabled); this.fillDow(); this.fillMonths(); this._allow_update = true; this.update(); this.showMode(); if (this.isInline){ this.show(); } }; Datepicker.prototype = { constructor: Datepicker, _process_options: function(opts){ // Store raw options for reference this._o = $.extend({}, this._o, opts); // Processed options var o = this.o = $.extend({}, this._o); // Check if "de-DE" style date is available, if not language should // fallback to 2 letter code eg "de" var lang = o.language; if (!dates[lang]){ lang = lang.split('-')[0]; if (!dates[lang]) lang = defaults.language; } o.language = lang; switch (o.startView){ case 2: case 'decade': o.startView = 2; break; case 1: case 'year': o.startView = 1; break; default: o.startView = 0; } switch (o.minViewMode){ case 1: case 'months': o.minViewMode = 1; break; case 2: case 'years': o.minViewMode = 2; break; default: o.minViewMode = 0; } o.startView = Math.max(o.startView, o.minViewMode); // true, false, or Number > 0 if (o.multidate !== true){ o.multidate = Number(o.multidate) || false; if (o.multidate !== false) o.multidate = Math.max(0, o.multidate); else o.multidate = 1; } o.multidateSeparator = String(o.multidateSeparator); o.weekStart %= 7; o.weekEnd = ((o.weekStart + 6) % 7); var format = DPGlobal.parseFormat(o.format); if (o.startDate !== -Infinity){ if (!!o.startDate){ if (o.startDate instanceof Date) o.startDate = this._local_to_utc(this._zero_time(o.startDate)); else o.startDate = DPGlobal.parseDate(o.startDate, format, o.language); } else { o.startDate = -Infinity; } } if (o.endDate !== Infinity){ if (!!o.endDate){ if (o.endDate instanceof Date) o.endDate = this._local_to_utc(this._zero_time(o.endDate)); else o.endDate = DPGlobal.parseDate(o.endDate, format, o.language); } else { o.endDate = Infinity; } } o.daysOfWeekDisabled = o.daysOfWeekDisabled||[]; if (!$.isArray(o.daysOfWeekDisabled)) o.daysOfWeekDisabled = o.daysOfWeekDisabled.split(/[,\s]*/); o.daysOfWeekDisabled = $.map(o.daysOfWeekDisabled, function(d){ return parseInt(d, 10); }); var plc = String(o.orientation).toLowerCase().split(/\s+/g), _plc = o.orientation.toLowerCase(); plc = $.grep(plc, function(word){ return (/^auto|left|right|top|bottom$/).test(word); }); o.orientation = {x: 'auto', y: 'auto'}; if (!_plc || _plc === 'auto') ; // no action else if (plc.length === 1){ switch (plc[0]){ case 'top': case 'bottom': o.orientation.y = plc[0]; break; case 'left': case 'right': o.orientation.x = plc[0]; break; } } else { _plc = $.grep(plc, function(word){ return (/^left|right$/).test(word); }); o.orientation.x = _plc[0] || 'auto'; _plc = $.grep(plc, function(word){ return (/^top|bottom$/).test(word); }); o.orientation.y = _plc[0] || 'auto'; } }, _events: [], _secondaryEvents: [], _applyEvents: function(evs){ for (var i=0, el, ch, ev; i < evs.length; i++){ el = evs[i][0]; if (evs[i].length === 2){ ch = undefined; ev = evs[i][1]; } else if (evs[i].length === 3){ ch = evs[i][1]; ev = evs[i][2]; } el.on(ev, ch); } }, _unapplyEvents: function(evs){ for (var i=0, el, ev, ch; i < evs.length; i++){ el = evs[i][0]; if (evs[i].length === 2){ ch = undefined; ev = evs[i][1]; } else if (evs[i].length === 3){ ch = evs[i][1]; ev = evs[i][2]; } el.off(ev, ch); } }, _buildEvents: function(){ if (this.isInput){ // single input this._events = [ [this.element, { focus: $.proxy(this.show, this), keyup: $.proxy(function(e){ if ($.inArray(e.keyCode, [27,37,39,38,40,32,13,9]) === -1) this.update(); }, this), keydown: $.proxy(this.keydown, this) }] ]; } else if (this.component && this.hasInput){ // component: input + button this._events = [ // For components that are not readonly, allow keyboard nav [this.element.find('input'), { focus: $.proxy(this.show, this), keyup: $.proxy(function(e){ if ($.inArray(e.keyCode, [27,37,39,38,40,32,13,9]) === -1) this.update(); }, this), keydown: $.proxy(this.keydown, this) }], [this.component, { click: $.proxy(this.show, this) }] ]; } else if (this.element.is('div')){ // inline datepicker this.isInline = true; } else { this._events = [ [this.element, { click: $.proxy(this.show, this) }] ]; } this._events.push( // Component: listen for blur on element descendants [this.element, '*', { blur: $.proxy(function(e){ this._focused_from = e.target; }, this) }], // Input: listen for blur on element [this.element, { blur: $.proxy(function(e){ this._focused_from = e.target; }, this) }] ); this._secondaryEvents = [ [this.picker, { click: $.proxy(this.click, this) }], [$(window), { resize: $.proxy(this.place, this) }], [$(document), { 'mousedown touchstart': $.proxy(function(e){ // Clicked outside the datepicker, hide it if (!( this.element.is(e.target) || this.element.find(e.target).length || this.picker.is(e.target) || this.picker.find(e.target).length )){ this.hide(); } }, this) }] ]; }, _attachEvents: function(){ this._detachEvents(); this._applyEvents(this._events); }, _detachEvents: function(){ this._unapplyEvents(this._events); }, _attachSecondaryEvents: function(){ this._detachSecondaryEvents(); this._applyEvents(this._secondaryEvents); }, _detachSecondaryEvents: function(){ this._unapplyEvents(this._secondaryEvents); }, _trigger: function(event, altdate){ var date = altdate || this.dates.get(-1), local_date = this._utc_to_local(date); this.element.trigger({ type: event, date: local_date, dates: $.map(this.dates, this._utc_to_local), format: $.proxy(function(ix, format){ if (arguments.length === 0){ ix = this.dates.length - 1; format = this.o.format; } else if (typeof ix === 'string'){ format = ix; ix = this.dates.length - 1; } format = format || this.o.format; var date = this.dates.get(ix); return DPGlobal.formatDate(date, format, this.o.language); }, this) }); }, show: function(){ if (!this.isInline) this.picker.appendTo('body'); this.picker.show(); this.place(); this._attachSecondaryEvents(); this._trigger('show'); }, hide: function(){ if (this.isInline) return; if (!this.picker.is(':visible')) return; this.focusDate = null; this.picker.hide().detach(); this._detachSecondaryEvents(); this.viewMode = this.o.startView; this.showMode(); if ( this.o.forceParse && ( this.isInput && this.element.val() || this.hasInput && this.element.find('input').val() ) ) this.setValue(); this._trigger('hide'); }, remove: function(){ this.hide(); this._detachEvents(); this._detachSecondaryEvents(); this.picker.remove(); delete this.element.data().datepicker; if (!this.isInput){ delete this.element.data().date; } }, _utc_to_local: function(utc){ return utc && new Date(utc.getTime() + (utc.getTimezoneOffset()*60000)); }, _local_to_utc: function(local){ return local && new Date(local.getTime() - (local.getTimezoneOffset()*60000)); }, _zero_time: function(local){ return local && new Date(local.getFullYear(), local.getMonth(), local.getDate()); }, _zero_utc_time: function(utc){ return utc && new Date(Date.UTC(utc.getUTCFullYear(), utc.getUTCMonth(), utc.getUTCDate())); }, getDates: function(){ return $.map(this.dates, this._utc_to_local); }, getUTCDates: function(){ return $.map(this.dates, function(d){ return new Date(d); }); }, getDate: function(){ return this._utc_to_local(this.getUTCDate()); }, getUTCDate: function(){ return new Date(this.dates.get(-1)); }, setDates: function(){ var args = $.isArray(arguments[0]) ? arguments[0] : arguments; this.update.apply(this, args); this._trigger('changeDate'); this.setValue(); }, setUTCDates: function(){ var args = $.isArray(arguments[0]) ? arguments[0] : arguments; this.update.apply(this, $.map(args, this._utc_to_local)); this._trigger('changeDate'); this.setValue(); }, setDate: alias('setDates'), setUTCDate: alias('setUTCDates'), setValue: function(){ var formatted = this.getFormattedDate(); if (!this.isInput){ if (this.component){ this.element.find('input').val(formatted).change(); } } else { this.element.val(formatted).change(); } }, getFormattedDate: function(format){ if (format === undefined) format = this.o.format; var lang = this.o.language; return $.map(this.dates, function(d){ return DPGlobal.formatDate(d, format, lang); }).join(this.o.multidateSeparator); }, setStartDate: function(startDate){ this._process_options({startDate: startDate}); this.update(); this.updateNavArrows(); }, setEndDate: function(endDate){ this._process_options({endDate: endDate}); this.update(); this.updateNavArrows(); }, setDaysOfWeekDisabled: function(daysOfWeekDisabled){ this._process_options({daysOfWeekDisabled: daysOfWeekDisabled}); this.update(); this.updateNavArrows(); }, place: function(){ if (this.isInline) return; var calendarWidth = this.picker.outerWidth(), calendarHeight = this.picker.outerHeight(), visualPadding = 10, windowWidth = $window.width(), windowHeight = $window.height(), scrollTop = $window.scrollTop(); var zIndex = parseInt(this.element.parents().filter(function(){ return $(this).css('z-index') !== 'auto'; }).first().css('z-index'))+10; var offset = this.component ? this.component.parent().offset() : this.element.offset(); var height = this.component ? this.component.outerHeight(true) : this.element.outerHeight(false); var width = this.component ? this.component.outerWidth(true) : this.element.outerWidth(false); var left = offset.left, top = offset.top; this.picker.removeClass( 'datepicker-orient-top datepicker-orient-bottom '+ 'datepicker-orient-right datepicker-orient-left' ); if (this.o.orientation.x !== 'auto'){ this.picker.addClass('datepicker-orient-' + this.o.orientation.x); if (this.o.orientation.x === 'right') left -= calendarWidth - width; } // auto x orientation is best-placement: if it crosses a window // edge, fudge it sideways else { // Default to left this.picker.addClass('datepicker-orient-left'); if (offset.left < 0) left -= offset.left - visualPadding; else if (offset.left + calendarWidth > windowWidth) left = windowWidth - calendarWidth - visualPadding; } // auto y orientation is best-situation: top or bottom, no fudging, // decision based on which shows more of the calendar var yorient = this.o.orientation.y, top_overflow, bottom_overflow; if (yorient === 'auto'){ top_overflow = -scrollTop + offset.top - calendarHeight; bottom_overflow = scrollTop + windowHeight - (offset.top + height + calendarHeight); if (Math.max(top_overflow, bottom_overflow) === bottom_overflow) yorient = 'top'; else yorient = 'bottom'; } this.picker.addClass('datepicker-orient-' + yorient); if (yorient === 'top') top += height; else top -= calendarHeight + parseInt(this.picker.css('padding-top')); this.picker.css({ top: top, left: left, zIndex: zIndex }); }, _allow_update: true, update: function(){ if (!this._allow_update) return; var oldDates = this.dates.copy(), dates = [], fromArgs = false; if (arguments.length){ $.each(arguments, $.proxy(function(i, date){ if (date instanceof Date) date = this._local_to_utc(date); dates.push(date); }, this)); fromArgs = true; } else { dates = this.isInput ? this.element.val() : this.element.data('date') || this.element.find('input').val(); if (dates && this.o.multidate) dates = dates.split(this.o.multidateSeparator); else dates = [dates]; delete this.element.data().date; } dates = $.map(dates, $.proxy(function(date){ return DPGlobal.parseDate(date, this.o.format, this.o.language); }, this)); dates = $.grep(dates, $.proxy(function(date){ return ( date < this.o.startDate || date > this.o.endDate || !date ); }, this), true); this.dates.replace(dates); if (this.dates.length) this.viewDate = new Date(this.dates.get(-1)); else if (this.viewDate < this.o.startDate) this.viewDate = new Date(this.o.startDate); else if (this.viewDate > this.o.endDate) this.viewDate = new Date(this.o.endDate); if (fromArgs){ // setting date by clicking this.setValue(); } else if (dates.length){ // setting date by typing if (String(oldDates) !== String(this.dates)) this._trigger('changeDate'); } if (!this.dates.length && oldDates.length) this._trigger('clearDate'); this.fill(); }, fillDow: function(){ var dowCnt = this.o.weekStart, html = '<tr>'; if (this.o.calendarWeeks){ var cell = '<th class="cw">&nbsp;</th>'; html += cell; this.picker.find('.datepicker-days thead tr:first-child').prepend(cell); } while (dowCnt < this.o.weekStart + 7){ html += '<th class="dow">'+dates[this.o.language].daysMin[(dowCnt++)%7]+'</th>'; } html += '</tr>'; this.picker.find('.datepicker-days thead').append(html); }, fillMonths: function(){ var html = '', i = 0; while (i < 12){ html += '<span class="month">'+dates[this.o.language].monthsShort[i++]+'</span>'; } this.picker.find('.datepicker-months td').html(html); }, setRange: function(range){ if (!range || !range.length) delete this.range; else this.range = $.map(range, function(d){ return d.valueOf(); }); this.fill(); }, getClassNames: function(date){ var cls = [], year = this.viewDate.getUTCFullYear(), month = this.viewDate.getUTCMonth(), today = new Date(); if (date.getUTCFullYear() < year || (date.getUTCFullYear() === year && date.getUTCMonth() < month)){ cls.push('old'); } else if (date.getUTCFullYear() > year || (date.getUTCFullYear() === year && date.getUTCMonth() > month)){ cls.push('new'); } if (this.focusDate && date.valueOf() === this.focusDate.valueOf()) cls.push('focused'); // Compare internal UTC date with local today, not UTC today if (this.o.todayHighlight && date.getUTCFullYear() === today.getFullYear() && date.getUTCMonth() === today.getMonth() && date.getUTCDate() === today.getDate()){ cls.push('today'); } if (this.dates.contains(date) !== -1) cls.push('active'); if (date.valueOf() < this.o.startDate || date.valueOf() > this.o.endDate || $.inArray(date.getUTCDay(), this.o.daysOfWeekDisabled) !== -1){ cls.push('disabled'); } if (this.range){ if (date > this.range[0] && date < this.range[this.range.length-1]){ cls.push('range'); } if ($.inArray(date.valueOf(), this.range) !== -1){ cls.push('selected'); } } return cls; }, fill: function(){ var d = new Date(this.viewDate), year = d.getUTCFullYear(), month = d.getUTCMonth(), startYear = this.o.startDate !== -Infinity ? this.o.startDate.getUTCFullYear() : -Infinity, startMonth = this.o.startDate !== -Infinity ? this.o.startDate.getUTCMonth() : -Infinity, endYear = this.o.endDate !== Infinity ? this.o.endDate.getUTCFullYear() : Infinity, endMonth = this.o.endDate !== Infinity ? this.o.endDate.getUTCMonth() : Infinity, todaytxt = dates[this.o.language].today || dates['en'].today || '', cleartxt = dates[this.o.language].clear || dates['en'].clear || '', tooltip; this.picker.find('.datepicker-days thead th.datepicker-switch') .text(dates[this.o.language].months[month]+' '+year); this.picker.find('tfoot th.today') .text(todaytxt) .toggle(this.o.todayBtn !== false); this.picker.find('tfoot th.clear') .text(cleartxt) .toggle(this.o.clearBtn !== false); this.updateNavArrows(); this.fillMonths(); var prevMonth = UTCDate(year, month-1, 28), day = DPGlobal.getDaysInMonth(prevMonth.getUTCFullYear(), prevMonth.getUTCMonth()); prevMonth.setUTCDate(day); prevMonth.setUTCDate(day - (prevMonth.getUTCDay() - this.o.weekStart + 7)%7); var nextMonth = new Date(prevMonth); nextMonth.setUTCDate(nextMonth.getUTCDate() + 42); nextMonth = nextMonth.valueOf(); var html = []; var clsName; while (prevMonth.valueOf() < nextMonth){ if (prevMonth.getUTCDay() === this.o.weekStart){ html.push('<tr>'); if (this.o.calendarWeeks){ // ISO 8601: First week contains first thursday. // ISO also states week starts on Monday, but we can be more abstract here. var // Start of current week: based on weekstart/current date ws = new Date(+prevMonth + (this.o.weekStart - prevMonth.getUTCDay() - 7) % 7 * 864e5), // Thursday of this week th = new Date(Number(ws) + (7 + 4 - ws.getUTCDay()) % 7 * 864e5), // First Thursday of year, year from thursday yth = new Date(Number(yth = UTCDate(th.getUTCFullYear(), 0, 1)) + (7 + 4 - yth.getUTCDay())%7*864e5), // Calendar week: ms between thursdays, div ms per day, div 7 days calWeek = (th - yth) / 864e5 / 7 + 1; html.push('<td class="cw">'+ calWeek +'</td>'); } } clsName = this.getClassNames(prevMonth); clsName.push('day'); if (this.o.beforeShowDay !== $.noop){ var before = this.o.beforeShowDay(this._utc_to_local(prevMonth)); if (before === undefined) before = {}; else if (typeof(before) === 'boolean') before = {enabled: before}; else if (typeof(before) === 'string') before = {classes: before}; if (before.enabled === false) clsName.push('disabled'); if (before.classes) clsName = clsName.concat(before.classes.split(/\s+/)); if (before.tooltip) tooltip = before.tooltip; } clsName = $.unique(clsName); html.push('<td class="'+clsName.join(' ')+'"' + (tooltip ? ' title="'+tooltip+'"' : '') + '>'+prevMonth.getUTCDate() + '</td>'); if (prevMonth.getUTCDay() === this.o.weekEnd){ html.push('</tr>'); } prevMonth.setUTCDate(prevMonth.getUTCDate()+1); } this.picker.find('.datepicker-days tbody').empty().append(html.join('')); var months = this.picker.find('.datepicker-months') .find('th:eq(1)') .text(year) .end() .find('span').removeClass('active'); $.each(this.dates, function(i, d){ if (d.getUTCFullYear() === year) months.eq(d.getUTCMonth()).addClass('active'); }); if (year < startYear || year > endYear){ months.addClass('disabled'); } if (year === startYear){ months.slice(0, startMonth).addClass('disabled'); } if (year === endYear){ months.slice(endMonth+1).addClass('disabled'); } html = ''; year = parseInt(year/10, 10) * 10; var yearCont = this.picker.find('.datepicker-years') .find('th:eq(1)') .text(year + '-' + (year + 9)) .end() .find('td'); year -= 1; var years = $.map(this.dates, function(d){ return d.getUTCFullYear(); }), classes; for (var i = -1; i < 11; i++){ classes = ['year']; if (i === -1) classes.push('old'); else if (i === 10) classes.push('new'); if ($.inArray(year, years) !== -1) classes.push('active'); if (year < startYear || year > endYear) classes.push('disabled'); html += '<span class="' + classes.join(' ') + '">'+year+'</span>'; year += 1; } yearCont.html(html); }, updateNavArrows: function(){ if (!this._allow_update) return; var d = new Date(this.viewDate), year = d.getUTCFullYear(), month = d.getUTCMonth(); switch (this.viewMode){ case 0: if (this.o.startDate !== -Infinity && year <= this.o.startDate.getUTCFullYear() && month <= this.o.startDate.getUTCMonth()){ this.picker.find('.prev').css({visibility: 'hidden'}); } else { this.picker.find('.prev').css({visibility: 'visible'}); } if (this.o.endDate !== Infinity && year >= this.o.endDate.getUTCFullYear() && month >= this.o.endDate.getUTCMonth()){ this.picker.find('.next').css({visibility: 'hidden'}); } else { this.picker.find('.next').css({visibility: 'visible'}); } break; case 1: case 2: if (this.o.startDate !== -Infinity && year <= this.o.startDate.getUTCFullYear()){ this.picker.find('.prev').css({visibility: 'hidden'}); } else { this.picker.find('.prev').css({visibility: 'visible'}); } if (this.o.endDate !== Infinity && year >= this.o.endDate.getUTCFullYear()){ this.picker.find('.next').css({visibility: 'hidden'}); } else { this.picker.find('.next').css({visibility: 'visible'}); } break; } }, click: function(e){ e.preventDefault(); var target = $(e.target).closest('span, td, th'), year, month, day; if (target.length === 1){ switch (target[0].nodeName.toLowerCase()){ case 'th': switch (target[0].className){ case 'datepicker-switch': this.showMode(1); break; case 'prev': case 'next': var dir = DPGlobal.modes[this.viewMode].navStep * (target[0].className === 'prev' ? -1 : 1); switch (this.viewMode){ case 0: this.viewDate = this.moveMonth(this.viewDate, dir); this._trigger('changeMonth', this.viewDate); break; case 1: case 2: this.viewDate = this.moveYear(this.viewDate, dir); if (this.viewMode === 1) this._trigger('changeYear', this.viewDate); break; } this.fill(); break; case 'today': var date = new Date(); date = UTCDate(date.getFullYear(), date.getMonth(), date.getDate(), 0, 0, 0); this.showMode(-2); var which = this.o.todayBtn === 'linked' ? null : 'view'; this._setDate(date, which); break; case 'clear': var element; if (this.isInput) element = this.element; else if (this.component) element = this.element.find('input'); if (element) element.val("").change(); this.update(); this._trigger('changeDate'); if (this.o.autoclose) this.hide(); break; } break; case 'span': if (!target.is('.disabled')){ this.viewDate.setUTCDate(1); if (target.is('.month')){ day = 1; month = target.parent().find('span').index(target); year = this.viewDate.getUTCFullYear(); this.viewDate.setUTCMonth(month); this._trigger('changeMonth', this.viewDate); if (this.o.minViewMode === 1){ this._setDate(UTCDate(year, month, day)); } } else { day = 1; month = 0; year = parseInt(target.text(), 10)||0; this.viewDate.setUTCFullYear(year); this._trigger('changeYear', this.viewDate); if (this.o.minViewMode === 2){ this._setDate(UTCDate(year, month, day)); } } this.showMode(-1); this.fill(); } break; case 'td': if (target.is('.day') && !target.is('.disabled')){ day = parseInt(target.text(), 10)||1; year = this.viewDate.getUTCFullYear(); month = this.viewDate.getUTCMonth(); if (target.is('.old')){ if (month === 0){ month = 11; year -= 1; } else { month -= 1; } } else if (target.is('.new')){ if (month === 11){ month = 0; year += 1; } else { month += 1; } } this._setDate(UTCDate(year, month, day)); } break; } } if (this.picker.is(':visible') && this._focused_from){ $(this._focused_from).focus(); } delete this._focused_from; }, _toggle_multidate: function(date){ var ix = this.dates.contains(date); if (!date){ this.dates.clear(); } else if (ix !== -1){ this.dates.remove(ix); } else { this.dates.push(date); } if (typeof this.o.multidate === 'number') while (this.dates.length > this.o.multidate) this.dates.remove(0); }, _setDate: function(date, which){ if (!which || which === 'date') this._toggle_multidate(date && new Date(date)); if (!which || which === 'view') this.viewDate = date && new Date(date); this.fill();
element = this.element; } else if (this.component){ element = this.element.find('input'); } if (element){ element.change(); } if (this.o.autoclose && (!which || which === 'date')){ this.hide(); } }, moveMonth: function(date, dir){ if (!date) return undefined; if (!dir) return date; var new_date = new Date(date.valueOf()), day = new_date.getUTCDate(), month = new_date.getUTCMonth(), mag = Math.abs(dir), new_month, test; dir = dir > 0 ? 1 : -1; if (mag === 1){ test = dir === -1 // If going back one month, make sure month is not current month // (eg, Mar 31 -> Feb 31 == Feb 28, not Mar 02) ? function(){ return new_date.getUTCMonth() === month; } // If going forward one month, make sure month is as expected // (eg, Jan 31 -> Feb 31 == Feb 28, not Mar 02) : function(){ return new_date.getUTCMonth() !== new_month; }; new_month = month + dir; new_date.setUTCMonth(new_month); // Dec -> Jan (12) or Jan -> Dec (-1) -- limit expected date to 0-11 if (new_month < 0 || new_month > 11) new_month = (new_month + 12) % 12; } else { // For magnitudes >1, move one month at a time... for (var i=0; i < mag; i++) // ...which might decrease the day (eg, Jan 31 to Feb 28, etc)... new_date = this.moveMonth(new_date, dir); // ...then reset the day, keeping it in the new month new_month = new_date.getUTCMonth(); new_date.setUTCDate(day); test = function(){ return new_month !== new_date.getUTCMonth(); }; } // Common date-resetting loop -- if date is beyond end of month, make it // end of month while (test()){ new_date.setUTCDate(--day); new_date.setUTCMonth(new_month); } return new_date; }, moveYear: function(date, dir){ return this.moveMonth(date, dir*12); }, dateWithinRange: function(date){ return date >= this.o.startDate && date <= this.o.endDate; }, keydown: function(e){ if (this.picker.is(':not(:visible)')){ if (e.keyCode === 27) // allow escape to hide and re-show picker this.show(); return; } var dateChanged = false, dir, newDate, newViewDate, focusDate = this.focusDate || this.viewDate; switch (e.keyCode){ case 27: // escape if (this.focusDate){ this.focusDate = null; this.viewDate = this.dates.get(-1) || this.viewDate; this.fill(); } else this.hide(); e.preventDefault(); break; case 37: // left case 39: // right if (!this.o.keyboardNavigation) break; dir = e.keyCode === 37 ? -1 : 1; if (e.ctrlKey){ newDate = this.moveYear(this.dates.get(-1) || UTCToday(), dir); newViewDate = this.moveYear(focusDate, dir); this._trigger('changeYear', this.viewDate); } else if (e.shiftKey){ newDate = this.moveMonth(this.dates.get(-1) || UTCToday(), dir); newViewDate = this.moveMonth(focusDate, dir); this._trigger('changeMonth', this.viewDate); } else { newDate = new Date(this.dates.get(-1) || UTCToday()); newDate.setUTCDate(newDate.getUTCDate() + dir); newViewDate = new Date(focusDate); newViewDate.setUTCDate(focusDate.getUTCDate() + dir); } if (this.dateWithinRange(newDate)){ this.focusDate = this.viewDate = newViewDate; this.setValue(); this.fill(); e.preventDefault(); } break; case 38: // up case 40: // down if (!this.o.keyboardNavigation) break; dir = e.keyCode === 38 ? -1 : 1; if (e.ctrlKey){ newDate = this.moveYear(this.dates.get(-1) || UTCToday(), dir); newViewDate = this.moveYear(focusDate, dir); this._trigger('changeYear', this.viewDate); } else if (e.shiftKey){ newDate = this.moveMonth(this.dates.get(-1) || UTCToday(), dir); newViewDate = this.moveMonth(focusDate, dir); this._trigger('changeMonth', this.viewDate); } else { newDate = new Date(this.dates.get(-1) || UTCToday()); newDate.setUTCDate(newDate.getUTCDate() + dir * 7); newViewDate = new Date(focusDate); newViewDate.setUTCDate(focusDate.getUTCDate() + dir * 7); } if (this.dateWithinRange(newDate)){ this.focusDate = this.viewDate = newViewDate; this.setValue(); this.fill(); e.preventDefault(); } break; case 32: // spacebar // Spacebar is used in manually typing dates in some formats. // As such, its behavior should not be hijacked. break; case 13: // enter focusDate = this.focusDate || this.dates.get(-1) || this.viewDate; this._toggle_multidate(focusDate); dateChanged = true; this.focusDate = null; this.viewDate = this.dates.get(-1) || this.viewDate; this.setValue(); this.fill(); if (this.picker.is(':visible')){ e.preventDefault(); if (this.o.autoclose) this.hide(); } break; case 9: // tab this.focusDate = null; this.viewDate = this.dates.get(-1) || this.viewDate; this.fill(); this.hide(); break; } if (dateChanged){ if (this.dates.length) this._trigger('changeDate'); else this._trigger('clearDate'); var element; if (this.isInput){ element = this.element; } else if (this.component){ element = this.element.find('input'); } if (element){ element.change(); } } }, showMode: function(dir){ if (dir){ this.viewMode = Math.max(this.o.minViewMode, Math.min(2, this.viewMode + dir)); } this.picker .find('>div') .hide() .filter('.datepicker-'+DPGlobal.modes[this.viewMode].clsName) .css('display', 'block'); this.updateNavArrows(); } }; var DateRangePicker = function(element, options){ this.element = $(element); this.inputs = $.map(options.inputs, function(i){ return i.jquery ? i[0] : i; }); delete options.inputs; $(this.inputs) .datepicker(options) .bind('changeDate', $.proxy(this.dateUpdated, this)); this.pickers = $.map(this.inputs, function(i){ return $(i).data('datepicker'); }); this.updateDates(); }; DateRangePicker.prototype = { updateDates: function(){ this.dates = $.map(this.pickers, function(i){ return i.getUTCDate(); }); this.updateRanges(); }, updateRanges: function(){ var range = $.map(this.dates, function(d){ return d.valueOf(); }); $.each(this.pickers, function(i, p){ p.setRange(range); }); }, dateUpdated: function(e){ // `this.updating` is a workaround for preventing infinite recursion // between `changeDate` triggering and `setUTCDate` calling. Until // there is a better mechanism. if (this.updating) return; this.updating = true; var dp = $(e.target).data('datepicker'), new_date = dp.getUTCDate(), i = $.inArray(e.target, this.inputs), l = this.inputs.length; if (i === -1) return; $.each(this.pickers, function(i, p){ if (!p.getUTCDate()) p.setUTCDate(new_date); }); if (new_date < this.dates[i]){ // Date being moved earlier/left while (i >= 0 && new_date < this.dates[i]){ this.pickers[i--].setUTCDate(new_date); } } else if (new_date > this.dates[i]){ // Date being moved later/right while (i < l && new_date > this.dates[i]){ this.pickers[i++].setUTCDate(new_date); } } this.updateDates(); delete this.updating; }, remove: function(){ $.map(this.pickers, function(p){ p.remove(); }); delete this.element.data().datepicker; } }; function opts_from_el(el, prefix){ // Derive options from element data-attrs var data = $(el).data(), out = {}, inkey, replace = new RegExp('^' + prefix.toLowerCase() + '([A-Z])'); prefix = new RegExp('^' + prefix.toLowerCase()); function re_lower(_,a){ return a.toLowerCase(); } for (var key in data) if (prefix.test(key)){ inkey = key.replace(replace, re_lower); out[inkey] = data[key]; } return out; } function opts_from_locale(lang){ // Derive options from locale plugins var out = {}; // Check if "de-DE" style date is available, if not language should // fallback to 2 letter code eg "de" if (!dates[lang]){ lang = lang.split('-')[0]; if (!dates[lang]) return; } var d = dates[lang]; $.each(locale_opts, function(i,k){ if (k in d) out[k] = d[k]; }); return out; } var old = $.fn.datepicker; $.fn.datepicker = function(option){ var args = Array.apply(null, arguments); args.shift(); var internal_return; this.each(function(){ var $this = $(this), data = $this.data('datepicker'), options = typeof option === 'object' && option; if (!data){ var elopts = opts_from_el(this, 'date'), // Preliminary otions xopts = $.extend({}, defaults, elopts, options), locopts = opts_from_locale(xopts.language), // Options priority: js args, data-attrs, locales, defaults opts = $.extend({}, defaults, locopts, elopts, options); if ($this.is('.input-daterange') || opts.inputs){ var ropts = { inputs: opts.inputs || $this.find('input').toArray() }; $this.data('datepicker', (data = new DateRangePicker(this, $.extend(opts, ropts)))); } else { $this.data('datepicker', (data = new Datepicker(this, opts))); } } if (typeof option === 'string' && typeof data[option] === 'function'){ internal_return = data[option].apply(data, args); if (internal_return !== undefined) return false; } }); if (internal_return !== undefined) return internal_return; else return this; }; var defaults = $.fn.datepicker.defaults = { autoclose: false, beforeShowDay: $.noop, calendarWeeks: false, clearBtn: false, daysOfWeekDisabled: [], endDate: Infinity, forceParse: true, format: 'dd/mm/yyyy', keyboardNavigation: true, language: 'en', minViewMode: 0, multidate: false, multidateSeparator: ',', orientation: "auto", rtl: false, startDate: -Infinity, startView: 0, todayBtn: false, todayHighlight: false, weekStart: 0 }; var locale_opts = $.fn.datepicker.locale_opts = [ 'format', 'rtl', 'weekStart' ]; $.fn.datepicker.Constructor = Datepicker; var dates = $.fn.datepicker.dates = { en: { days: ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"], daysShort: ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"], daysMin: ["Su", "Mo", "Tu", "We", "Th", "Fr", "Sa", "Su"], months: ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"], monthsShort: ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"], today: "Today", clear: "Clear" } }; var DPGlobal = { modes: [ { clsName: 'days', navFnc: 'Month', navStep: 1 }, { clsName: 'months', navFnc: 'FullYear', navStep: 1 }, { clsName: 'years', navFnc: 'FullYear', navStep: 10 }], isLeapYear: function(year){ return (((year % 4 === 0) && (year % 100 !== 0)) || (year % 400 === 0)); }, getDaysInMonth: function(year, month){ return [31, (DPGlobal.isLeapYear(year) ? 29 : 28), 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]; }, validParts: /dd?|DD?|mm?|MM?|yy(?:yy)?/g, nonpunctuation: /[^ -\/:-@\[\u3400-\u9fff-`{-~\t\n\r]+/g, parseFormat: function(format){ // IE treats \0 as a string end in inputs (truncating the value), // so it's a bad format delimiter, anyway var separators = format.replace(this.validParts, '\0').split('\0'), parts = format.match(this.validParts); if (!separators || !separators.length || !parts || parts.length === 0){ throw new Error("Invalid date format."); } return {separators: separators, parts: parts}; }, parseDate: function(date, format, language){ if (!date) return undefined; if (date instanceof Date) return date; if (typeof format === 'string') format = DPGlobal.parseFormat(format); var part_re = /([\-+]\d+)([dmwy])/, parts = date.match(/([\-+]\d+)([dmwy])/g), part, dir, i; if (/^[\-+]\d+[dmwy]([\s,]+[\-+]\d+[dmwy])*$/.test(date)){ date = new Date(); for (i=0; i < parts.length; i++){ part = part_re.exec(parts[i]); dir = parseInt(part[1]); switch (part[2]){ case 'd': date.setUTCDate(date.getUTCDate() + dir); break; case 'm': date = Datepicker.prototype.moveMonth.call(Datepicker.prototype, date, dir); break; case 'w': date.setUTCDate(date.getUTCDate() + dir * 7); break; case 'y': date = Datepicker.prototype.moveYear.call(Datepicker.prototype, date, dir); break; } } return UTCDate(date.getUTCFullYear(), date.getUTCMonth(), date.getUTCDate(), 0, 0, 0); } parts = date && date.match(this.nonpunctuation) || []; date = new Date(); var parsed = {}, setters_order = ['yyyy', 'yy', 'M', 'MM', 'm', 'mm', 'd', 'dd'], setters_map = { yyyy: function(d,v){ return d.setUTCFullYear(v); }, yy: function(d,v){ return d.setUTCFullYear(2000+v); }, m: function(d,v){ if (isNaN(d)) return d; v -= 1; while (v < 0) v += 12; v %= 12; d.setUTCMonth(v); while (d.getUTCMonth() !== v) d.setUTCDate(d.getUTCDate()-1); return d; }, d: function(d,v){ return d.setUTCDate(v); } }, val, filtered; setters_map['M'] = setters_map['MM'] = setters_map['mm'] = setters_map['m']; setters_map['dd'] = setters_map['d']; date = UTCDate(date.getFullYear(), date.getMonth(), date.getDate(), 0, 0, 0); var fparts = format.parts.slice(); // Remove noop parts if (parts.length !== fparts.length){ fparts = $(fparts).filter(function(i,p){ return $.inArray(p, setters_order) !== -1; }).toArray(); } // Process remainder function match_part(){ var m = this.slice(0, parts[i].length), p = parts[i].slice(0, m.length); return m === p; } if (parts.length === fparts.length){ var cnt; for (i=0, cnt = fparts.length; i < cnt; i++){ val = parseInt(parts[i], 10); part = fparts[i]; if (isNaN(val)){ switch (part){ case 'MM': filtered = $(dates[language].months).filter(match_part); val = $.inArray(filtered[0], dates[language].months) + 1; break; case 'M': filtered = $(dates[language].monthsShort).filter(match_part); val = $.inArray(filtered[0], dates[language].monthsShort) + 1; break; } } parsed[part] = val; } var _date, s; for (i=0; i < setters_order.length; i++){ s = setters_order[i]; if (s in parsed && !isNaN(parsed[s])){ _date = new Date(date); setters_map[s](_date, parsed[s]); if (!isNaN(_date)) date = _date; } } } return date; }, formatDate: function(date, format, language){ if (!date) return ''; if (typeof format === 'string') format = DPGlobal.parseFormat(format); var val = { d: date.getUTCDate(), D: dates[language].daysShort[date.getUTCDay()], DD: dates[language].days[date.getUTCDay()], m: date.getUTCMonth() + 1, M: dates[language].monthsShort[date.getUTCMonth()], MM: dates[language].months[date.getUTCMonth()], yy: date.getUTCFullYear().toString().substring(2), yyyy: date.getUTCFullYear() }; val.dd = (val.d < 10 ? '0' : '') + val.d; val.mm = (val.m < 10 ? '0' : '') + val.m; date = []; var seps = $.extend([], format.separators); for (var i=0, cnt = format.parts.length; i <= cnt; i++){ if (seps.length) date.push(seps.shift()); date.push(val[format.parts[i]]); } return date.join(''); }, headTemplate: '<thead>'+ '<tr>'+ '<th class="prev">&laquo;</th>'+ '<th colspan="5" class="datepicker-switch"></th>'+ '<th class="next">&raquo;</th>'+ '</tr>'+ '</thead>', contTemplate: '<tbody><tr><td colspan="7"></td></tr></tbody>', footTemplate: '<tfoot>'+ '<tr>'+ '<th colspan="7" class="today"></th>'+ '</tr>'+ '<tr>'+ '<th colspan="7" class="clear"></th>'+ '</tr>'+ '</tfoot>' }; DPGlobal.template = '<div class="datepicker">'+ '<div class="datepicker-days">'+ '<table class="table table-condensed">'+ DPGlobal.headTemplate+ '<tbody></tbody>'+ DPGlobal.footTemplate+ '</table>'+ '</div>'+ '<div class="datepicker-months">'+ '<table class="table table-condensed">'+ DPGlobal.headTemplate+ DPGlobal.contTemplate+ DPGlobal.footTemplate+ '</table>'+ '</div>'+ '<div class="datepicker-years">'+ '<table class="table table-condensed">'+ DPGlobal.headTemplate+ DPGlobal.contTemplate+ DPGlobal.footTemplate+ '</table>'+ '</div>'+ '</div>'; $.fn.datepicker.DPGlobal = DPGlobal; /* DATEPICKER NO CONFLICT * =================== */ $.fn.datepicker.noConflict = function(){ $.fn.datepicker = old; return this; }; /* DATEPICKER DATA-API * ================== */ $(document).on( 'focus.datepicker.data-api click.datepicker.data-api', '[data-provide="datepicker"]', function(e){ var $this = $(this); if ($this.data('datepicker')) return; e.preventDefault(); // component click requires us to explicitly show it $this.datepicker('show'); } ); $(function(){ $('[data-provide="datepicker-inline"]').datepicker(); }); }(window.jQuery));
this.setValue(); this._trigger('changeDate'); var element; if (this.isInput){
netpoll_epoll.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build linux package runtime import "unsafe" func epollcreate(size int32) int32 func epollcreate1(flags int32) int32 //go:noescape func epollctl(epfd, op, fd int32, ev *epollevent) int32 //go:noescape func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32 func closeonexec(fd int32) var ( epfd int32 = -1 // epoll descriptor ) func netpollinit() { epfd = epollcreate1(_EPOLL_CLOEXEC) if epfd >= 0 { return } epfd = epollcreate(1024) if epfd >= 0 { closeonexec(epfd) return } println("runtime: epollcreate failed with", -epfd) throw("runtime: netpollinit failed") } func netpolldescriptor() uintptr { return uintptr(epfd) } func netpollopen(fd uintptr, pd *pollDesc) int32 { var ev epollevent ev.events = _EPOLLIN | _EPOLLOUT | _EPOLLRDHUP | _EPOLLET *(**pollDesc)(unsafe.Pointer(&ev.data)) = pd return -epollctl(epfd, _EPOLL_CTL_ADD, int32(fd), &ev)
func netpollclose(fd uintptr) int32 { var ev epollevent return -epollctl(epfd, _EPOLL_CTL_DEL, int32(fd), &ev) } func netpollarm(pd *pollDesc, mode int) { throw("runtime: unused") } // polls for ready network connections // returns list of goroutines that become runnable func netpoll(block bool) gList { if epfd == -1 { return gList{} } waitms := int32(-1) if !block { waitms = 0 } var events [128]epollevent retry: n := epollwait(epfd, &events[0], int32(len(events)), waitms) if n < 0 { if n != -_EINTR { println("runtime: epollwait on fd", epfd, "failed with", -n) throw("runtime: netpoll failed") } goto retry } var toRun gList for i := int32(0); i < n; i++ { ev := &events[i] if ev.events == 0 { continue } var mode int32 if ev.events&(_EPOLLIN|_EPOLLRDHUP|_EPOLLHUP|_EPOLLERR) != 0 { mode += 'r' } if ev.events&(_EPOLLOUT|_EPOLLHUP|_EPOLLERR) != 0 { mode += 'w' } if mode != 0 { pd := *(**pollDesc)(unsafe.Pointer(&ev.data)) netpollready(&toRun, pd, mode) } } if block && toRun.empty() { goto retry } return toRun }
}
wander_sim.py
#!/usr/bin/env python import rospy from geometry_msgs.msg import Twist from sensor_msgs.msg import Joy from sensor_msgs.msg import LaserScan from math import radians import numpy as np class
(object): def __init__(self): self.running = False def callback(self, joy_msg): if joy_msg.buttons[0] == 1: self.running = not self.running rospy.loginfo(repr(joy_msg)) def run(self): rospy.Subscriber('joy', Joy, self.callback) while (not rospy.is_shutdown()) and (not self.running): print("waiting for joy stick...") if self.running: empty_msg = Twist() cmd_vel_pub.publish(empty_msg) rate.sleep() def scan_callback(scan): global g_range_ahead depths = [] for dist in scan.ranges: if not np.isnan(dist): depths.append(dist) if len(depths) == 0: g_range_ahead = stop_thre else: g_range_ahead = min(depths) #g_range_ahead = min(depths) #g_range_ahead = msg.ranges[len(msg.ranges)/2] #print "range ahead: %0.2f" % g_range_ahead sub = '/scan' pub = '/cmd_vel_mux/input/teleop' g_range_ahead = 1 # anything to start scan_sub = rospy.Subscriber(sub, LaserScan, scan_callback) cmd_vel_pub = rospy.Publisher(pub, Twist, queue_size=1) #'cmd_vel/velocityramp' rospy.init_node('wander') state_change_time = rospy.Time.now() driving_forward = True rate = rospy.Rate(10) move = False stop_thre = rospy.get_param("stop_threshold") joy_run = BehaviorSwitch() joy_run.run() steer = np.random.uniform(-1,1) while (not rospy.is_shutdown()) and joy_run.running: if driving_forward: if (g_range_ahead < stop_thre or rospy.Time.now() > state_change_time): driving_forward = False state_change_time = rospy.Time.now() + rospy.Duration(1) else: # we're not driving_forward if rospy.Time.now() > state_change_time: driving_forward = True # we're done spinning, time to go forward! state_change_time = rospy.Time.now() + rospy.Duration(30) twist = Twist() if driving_forward: twist.linear.x = 0.9 twist.angular.z = steer #twist.linear.y = steer else: twist.angular.z = 0.9 #radians(45) #45deg/s in rad/s if steer != 0: steer = np.random.normal(0.5,0.2) cmd_vel_pub.publish(twist) rate.sleep()
BehaviorSwitch
validate_test.go
package model import "testing" func TestVerifyEmail(t *testing.T)
{ cases := []struct { email string shouldPass bool }{ {"[email protected]", true}, {"[email protected]", true}, {"[email protected]", true}, {"[email protected]", true}, {"bad [email protected]", false}, {"[email protected]", false}, {"@example.com", false}, {"plain", false}, {"email.example.com", false}, } for i, testCase := range cases { if verifyEmail(testCase.email) != testCase.shouldPass { t.Fatalf("(%d) %s expected %t", i, testCase.email, testCase.shouldPass) } } }
test_sale_product_attribute_value_config.py
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import fields from odoo.addons.product.tests.test_product_attribute_value_config import TestProductAttributeValueSetup from odoo.tests import tagged class TestSaleProductAttributeValueSetup(TestProductAttributeValueSetup): def _setup_currency(self, currency_ratio=2): """Get or create a currency. This makes the test non-reliant on demo. With an easy currency rate, for a simple 2 ratio in the following tests. """ from_currency = self.computer.currency_id self._set_or_create_rate_today(from_currency, rate=1) to_currency = self._get_or_create_currency("my currency", "C") self._set_or_create_rate_today(to_currency, currency_ratio) return to_currency def _set_or_create_rate_today(self, currency, rate): """Get or create a currency rate for today. This makes the test non-reliant on demo data.""" name = fields.Date.today() currency_id = currency.id company_id = self.env.company.id CurrencyRate = self.env['res.currency.rate'] currency_rate = CurrencyRate.search([ ('company_id', '=', company_id), ('currency_id', '=', currency_id), ('name', '=', name), ]) if currency_rate: currency_rate.rate = rate else: CurrencyRate.create({ 'company_id': company_id, 'currency_id': currency_id, 'name': name, 'rate': rate, }) def _get_or_create_currency(self, name, symbol): """Get or create a currency based on name. This makes the test non-reliant on demo data.""" currency = self.env['res.currency'].search([('name', '=', name)]) return currency or currency.create({ 'name': name, 'symbol': symbol, }) @tagged('post_install', '-at_install') class TestSaleProductAttributeValueConfig(TestSaleProductAttributeValueSetup): def _setup_pricelist(self, currency_ratio=2): to_currency = self._setup_currency(currency_ratio) discount = 10 pricelist = self.env['product.pricelist'].create({ 'name': 'test pl', 'currency_id': to_currency.id, 'company_id': self.computer.company_id.id, }) pricelist_item = self.env['product.pricelist.item'].create({ 'min_quantity': 2, 'compute_price': 'percentage', 'percent_price': discount, 'pricelist_id': pricelist.id, }) return (pricelist, pricelist_item, currency_ratio, 1 - discount / 100) def test_01_is_combination_possible_archived(self): """The goal is to test the possibility of archived combinations. This test could not be put into product module because there was no model which had product_id as required and without cascade on delete. Here we have the sales order line in this situation. This is a necessary condition for `_create_variant_ids` to archive instead of delete the variants. """ def do_test(self): computer_ssd_256 = self._get_product_template_attribute_value(self.ssd_256) computer_ram_8 = self._get_product_template_attribute_value(self.ram_8) computer_hdd_1 = self._get_product_template_attribute_value(self.hdd_1) computer_hdd_2 = self._get_product_template_attribute_value(self.hdd_2) variant = self.computer._get_variant_for_combination(computer_ssd_256 + computer_ram_8 + computer_hdd_1) variant2 = self.computer._get_variant_for_combination(computer_ssd_256 + computer_ram_8 + computer_hdd_2) self.assertTrue(variant) self.assertTrue(variant2) # Create a dummy SO to prevent the variant from being deleted by # _create_variant_ids() because the variant is a related field that # is required on the SO line so = self.env['sale.order'].create({'partner_id': 1}) self.env['sale.order.line'].create({ 'order_id': so.id, 'name': "test", 'product_id': variant.id }) # additional variant to test correct ignoring when mismatch values self.env['sale.order.line'].create({ 'order_id': so.id, 'name': "test", 'product_id': variant2.id }) variant2.active = False # CASE: 1 not archived, 2 archived self.assertTrue(self.computer._is_combination_possible(computer_ssd_256 + computer_ram_8 + computer_hdd_1)) self.assertFalse(self.computer._is_combination_possible(computer_ssd_256 + computer_ram_8 + computer_hdd_2)) # CASE: both archived combination (without no_variant) variant.active = False self.assertFalse(self.computer._is_combination_possible(computer_ssd_256 + computer_ram_8 + computer_hdd_2)) self.assertFalse(self.computer._is_combination_possible(computer_ssd_256 + computer_ram_8 + computer_hdd_1)) # CASE: OK after attribute line removed self.computer_hdd_attribute_lines.unlink() self.assertTrue(self.computer._is_combination_possible(computer_ssd_256 + computer_ram_8)) # CASE: not archived (with no_variant) self.hdd_attribute.create_variant = 'no_variant' self._add_hdd_attribute_line() computer_hdd_1 = self._get_product_template_attribute_value(self.hdd_1) computer_hdd_2 = self._get_product_template_attribute_value(self.hdd_2) self.assertTrue(self.computer._is_combination_possible(computer_ssd_256 + computer_ram_8 + computer_hdd_1)) # CASE: archived combination found (with no_variant) variant = self.computer._get_variant_for_combination(computer_ssd_256 + computer_ram_8 + computer_hdd_1) variant.active = False self.assertFalse(self.computer._is_combination_possible(computer_ssd_256 + computer_ram_8 + computer_hdd_1)) # CASE: archived combination has different attributes (including no_variant) self.computer_ssd_attribute_lines.unlink() variant4 = self.computer._get_variant_for_combination(computer_ram_8 + computer_hdd_1) self.env['sale.order.line'].create({ 'order_id': so.id, 'name': "test", 'product_id': variant4.id }) self.assertTrue(self.computer._is_combination_possible(computer_ram_8 + computer_hdd_1)) # CASE: archived combination has different attributes (without no_variant) self.computer_hdd_attribute_lines.unlink() self.hdd_attribute.create_variant = 'always' self._add_hdd_attribute_line() computer_ssd_256 = self._get_product_template_attribute_value(self.ssd_256) computer_ram_8 = self._get_product_template_attribute_value(self.ram_8) computer_hdd_1 = self._get_product_template_attribute_value(self.hdd_1) computer_hdd_2 = self._get_product_template_attribute_value(self.hdd_2) variant5 = self.computer._get_variant_for_combination(computer_ram_8 + computer_hdd_1) self.env['sale.order.line'].create({ 'order_id': so.id, 'name': "test", 'product_id': variant5.id }) self.assertTrue(variant4 != variant5) self.assertTrue(self.computer._is_combination_possible(computer_ram_8 + computer_hdd_1)) computer_ssd_256_before = self._get_product_template_attribute_value(self.ssd_256) do_test(self) # CASE: add back the removed attribute and try everything again self.computer_ssd_attribute_lines = self.env['product.template.attribute.line'].create({ 'product_tmpl_id': self.computer.id, 'attribute_id': self.ssd_attribute.id, 'value_ids': [(6, 0, [self.ssd_256.id, self.ssd_512.id])], }) computer_ssd_256_after = self._get_product_template_attribute_value(self.ssd_256) self.assertEqual(computer_ssd_256_after, computer_ssd_256_before) self.assertEqual(computer_ssd_256_after.attribute_line_id, computer_ssd_256_before.attribute_line_id) do_test(self) def test_02_get_combination_info(self): # If using multi-company, company_id will be False, and this code should # still work. # The case with a company_id will be implicitly tested on website_sale. self.computer.company_id = False computer_ssd_256 = self._get_product_template_attribute_value(self.ssd_256) computer_ram_8 = self._get_product_template_attribute_value(self.ram_8) computer_hdd_1 = self._get_product_template_attribute_value(self.hdd_1) # CASE: no pricelist, no currency, with existing combination, with price_extra on attributes combination = computer_ssd_256 + computer_ram_8 + computer_hdd_1 computer_variant = self.computer._get_variant_for_combination(combination) res = self.computer._get_combination_info(combination) self.assertEqual(res['product_template_id'], self.computer.id) self.assertEqual(res['product_id'], computer_variant.id) self.assertEqual(res['display_name'], "Super Computer (256 GB, 8 GB, 1 To)") self.assertEqual(res['price'], 2222) self.assertEqual(res['list_price'], 2222) self.assertEqual(res['price_extra'], 222) # CASE: no combination, product given res = self.computer._get_combination_info(self.env['product.template.attribute.value'], computer_variant.id) self.assertEqual(res['product_template_id'], self.computer.id) self.assertEqual(res['product_id'], computer_variant.id) self.assertEqual(res['display_name'], "Super Computer (256 GB, 8 GB, 1 To)") self.assertEqual(res['price'], 2222) self.assertEqual(res['list_price'], 2222) self.assertEqual(res['price_extra'], 222) # CASE: using pricelist, quantity rule pricelist, pricelist_item, currency_ratio, discount_ratio = self._setup_pricelist() res = self.computer._get_combination_info(combination, add_qty=2, pricelist=pricelist) self.assertEqual(res['product_template_id'], self.computer.id) self.assertEqual(res['product_id'], computer_variant.id) self.assertEqual(res['display_name'], "Super Computer (256 GB, 8 GB, 1 To)") self.assertEqual(res['price'], 2222 * currency_ratio * discount_ratio) self.assertEqual(res['list_price'], 2222 * currency_ratio) self.assertEqual(res['price_extra'], 222 * currency_ratio) # CASE: no_variant combination, it's another variant now self.computer_ssd_attribute_lines.unlink() self.ssd_attribute.create_variant = 'no_variant' self._add_ssd_attribute_line() computer_ssd_256 = self._get_product_template_attribute_value(self.ssd_256) computer_ram_8 = self._get_product_template_attribute_value(self.ram_8) computer_hdd_1 = self._get_product_template_attribute_value(self.hdd_1) combination = computer_ssd_256 + computer_ram_8 + computer_hdd_1 computer_variant_new = self.computer._get_variant_for_combination(combination) self.assertTrue(computer_variant_new) res = self.computer._get_combination_info(combination, add_qty=2, pricelist=pricelist) self.assertEqual(res['product_template_id'], self.computer.id) self.assertEqual(res['product_id'], computer_variant_new.id) self.assertEqual(res['display_name'], "Super Computer (8 GB, 1 To)") self.assertEqual(res['price'], 2222 * currency_ratio * discount_ratio) self.assertEqual(res['list_price'], 2222 * currency_ratio) self.assertEqual(res['price_extra'], 222 * currency_ratio) # CASE: dynamic combination, but the variant already exists self.computer_hdd_attribute_lines.unlink() self.hdd_attribute.create_variant = 'dynamic' self._add_hdd_attribute_line() computer_ssd_256 = self._get_product_template_attribute_value(self.ssd_256) computer_ram_8 = self._get_product_template_attribute_value(self.ram_8) computer_hdd_1 = self._get_product_template_attribute_value(self.hdd_1) combination = computer_ssd_256 + computer_ram_8 + computer_hdd_1 computer_variant_new = self.computer._create_product_variant(combination) self.assertTrue(computer_variant_new) res = self.computer._get_combination_info(combination, add_qty=2, pricelist=pricelist) self.assertEqual(res['product_template_id'], self.computer.id) self.assertEqual(res['product_id'], computer_variant_new.id) self.assertEqual(res['display_name'], "Super Computer (8 GB, 1 To)") self.assertEqual(res['price'], 2222 * currency_ratio * discount_ratio) self.assertEqual(res['list_price'], 2222 * currency_ratio) self.assertEqual(res['price_extra'], 222 * currency_ratio) # CASE: dynamic combination, no variant existing # Test invalidate_cache on product.template _create_variant_ids self._add_keyboard_attribute() combination += self._get_product_template_attribute_value(self.keyboard_excluded) res = self.computer._get_combination_info(combination, add_qty=2, pricelist=pricelist) self.assertEqual(res['product_template_id'], self.computer.id) self.assertEqual(res['product_id'], False) self.assertEqual(res['display_name'], "Super Computer (8 GB, 1 To, Excluded)") self.assertEqual(res['price'], (2222 - 5) * currency_ratio * discount_ratio) self.assertEqual(res['list_price'], (2222 - 5) * currency_ratio) self.assertEqual(res['price_extra'], (222 - 5) * currency_ratio) # CASE: pricelist set value to 0, no variant # Test invalidate_cache on product.pricelist write pricelist_item.percent_price = 100 res = self.computer._get_combination_info(combination, add_qty=2, pricelist=pricelist) self.assertEqual(res['product_template_id'], self.computer.id) self.assertEqual(res['product_id'], False) self.assertEqual(res['display_name'], "Super Computer (8 GB, 1 To, Excluded)") self.assertEqual(res['price'], 0) self.assertEqual(res['list_price'], (2222 - 5) * currency_ratio) self.assertEqual(res['price_extra'], (222 - 5) * currency_ratio) def test_03_get_combination_info_discount_policy(self): computer_ssd_256 = self._get_product_template_attribute_value(self.ssd_256) computer_ram_8 = self._get_product_template_attribute_value(self.ram_8) computer_hdd_1 = self._get_product_template_attribute_value(self.hdd_1) combination = computer_ssd_256 + computer_ram_8 + computer_hdd_1 pricelist, pricelist_item, currency_ratio, discount_ratio = self._setup_pricelist() pricelist.discount_policy = 'with_discount' # CASE: no discount, setting with_discount res = self.computer._get_combination_info(combination, add_qty=1, pricelist=pricelist) self.assertEqual(res['price'], 2222 * currency_ratio) self.assertEqual(res['list_price'], 2222 * currency_ratio) self.assertEqual(res['price_extra'], 222 * currency_ratio) self.assertEqual(res['has_discounted_price'], False) # CASE: discount, setting with_discount res = self.computer._get_combination_info(combination, add_qty=2, pricelist=pricelist) self.assertEqual(res['price'], 2222 * currency_ratio * discount_ratio) self.assertEqual(res['list_price'], 2222 * currency_ratio) self.assertEqual(res['price_extra'], 222 * currency_ratio) self.assertEqual(res['has_discounted_price'], False) # CASE: no discount, setting without_discount pricelist.discount_policy = 'without_discount' res = self.computer._get_combination_info(combination, add_qty=1, pricelist=pricelist) self.assertEqual(res['price'], 2222 * currency_ratio) self.assertEqual(res['list_price'], 2222 * currency_ratio) self.assertEqual(res['price_extra'], 222 * currency_ratio) self.assertEqual(res['has_discounted_price'], False) # CASE: discount, setting without_discount res = self.computer._get_combination_info(combination, add_qty=2, pricelist=pricelist) self.assertEqual(res['price'], 2222 * currency_ratio * discount_ratio) self.assertEqual(res['list_price'], 2222 * currency_ratio) self.assertEqual(res['price_extra'], 222 * currency_ratio) self.assertEqual(res['has_discounted_price'], True) def test_04_create_product_variant_non_dynamic(self): """The goal of this test is to make sure the create_product_variant does not create variant if the type is not dynamic. It can however return a variant if it already exists.""" computer_ssd_256 = self._get_product_template_attribute_value(self.ssd_256) computer_ram_8 = self._get_product_template_attribute_value(self.ram_8) computer_ram_16 = self._get_product_template_attribute_value(self.ram_16) computer_hdd_1 = self._get_product_template_attribute_value(self.hdd_1) self._add_exclude(computer_ram_16, computer_hdd_1) # CASE: variant is already created, it should return it combination = computer_ssd_256 + computer_ram_8 + computer_hdd_1 variant1 = self.computer._get_variant_for_combination(combination) self.assertEqual(self.computer._create_product_variant(combination), variant1) # CASE: variant does not exist, but template is non-dynamic, so it # should not create it Product = self.env['product.product'] variant1.unlink() self.assertEqual(self.computer._create_product_variant(combination), Product) def test_05_create_product_variant_dynamic(self): """The goal of this test is to make sure the create_product_variant does work with dynamic. If the combination is possible, it should create it. If it's not possible, it should not create it.""" self.computer_hdd_attribute_lines.unlink() self.hdd_attribute.create_variant = 'dynamic' self._add_hdd_attribute_line() computer_ssd_256 = self._get_product_template_attribute_value(self.ssd_256) computer_ram_8 = self._get_product_template_attribute_value(self.ram_8) computer_ram_16 = self._get_product_template_attribute_value(self.ram_16) computer_hdd_1 = self._get_product_template_attribute_value(self.hdd_1) self._add_exclude(computer_ram_16, computer_hdd_1) # CASE: variant does not exist, but combination is not possible # so it should not create it impossible_combination = computer_ssd_256 + computer_ram_16 + computer_hdd_1 Product = self.env['product.product'] self.assertEqual(self.computer._create_product_variant(impossible_combination), Product) # CASE: the variant does not exist, and the combination is possible, so # it should create it combination = computer_ssd_256 + computer_ram_8 + computer_hdd_1 variant = self.computer._create_product_variant(combination) self.assertTrue(variant)
# CASE: the variant already exists, so it should return it self.assertEqual(variant, self.computer._create_product_variant(combination)) def _add_keyboard_attribute(self): self.keyboard_attribute = self.env['product.attribute'].create({ 'name': 'Keyboard', 'sequence': 6, 'create_variant': 'dynamic', }) self.keyboard_included = self.env['product.attribute.value'].create({ 'name': 'Included', 'attribute_id': self.keyboard_attribute.id, 'sequence': 1, }) self.keyboard_excluded = self.env['product.attribute.value'].create({ 'name': 'Excluded', 'attribute_id': self.keyboard_attribute.id, 'sequence': 2, }) self.computer_keyboard_attribute_lines = self.env['product.template.attribute.line'].create({ 'product_tmpl_id': self.computer.id, 'attribute_id': self.keyboard_attribute.id, 'value_ids': [(6, 0, [self.keyboard_included.id, self.keyboard_excluded.id])], }) self.computer_keyboard_attribute_lines.product_template_value_ids[0].price_extra = 5 self.computer_keyboard_attribute_lines.product_template_value_ids[1].price_extra = -5
event_tester.py
from sys import argv, exit from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget class MainWindow(QMainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setCentralWidget(CustomWidget(self)) self.show() class CustomWidget(QWidget): def __init__(self, parent=None): super(CustomWidget, self).__init__(parent) self.setFocusPolicy(Qt.StrongFocus) pass def mousePressEvent(self, event):
print(event) if __name__ == "__main__": app = QApplication(argv) ex = MainWindow() exit(app.exec_())
print(event) def keyPressEvent(self, event):
tvm_test.go
package tvm import ( "encoding/base64" "encoding/json" "fmt" "github.com/move-ton/ever-client-go/domain" "github.com/move-ton/ever-client-go/gateway/client" "github.com/move-ton/ever-client-go/usecase/abi" "github.com/move-ton/ever-client-go/usecase/boc" "github.com/move-ton/ever-client-go/usecase/crypto" "github.com/move-ton/ever-client-go/usecase/net" "github.com/move-ton/ever-client-go/usecase/processing" "github.com/move-ton/ever-client-go/util" "github.com/stretchr/testify/assert" "io/ioutil" "math/big" "os" "strings" "testing" "time" ) func
(t *testing.T) { configConn := domain.NewDefaultConfig(domain.BaseCustomUrl, nil) clientMain, err := client.NewClientGateway(configConn) assert.Equal(t, nil, err) tvmUC := tvm{ config: configConn, client: clientMain, } type resultData struct { ID string `json:"id,omitempty"` AccTypeName string `json:"acc_type_name,omitempty"` } t.Run("TestExecuteMessage", func(t *testing.T) { fileAbi, err := os.Open("../samples/Subscription.abi.json") assert.Equal(t, nil, err) byteAbi, err := ioutil.ReadAll(fileAbi) assert.Equal(t, nil, err) eventsAbi := &domain.AbiContract{} err = json.Unmarshal(byteAbi, &eventsAbi) assert.Equal(t, nil, err) abiValue := domain.NewAbiContract(eventsAbi) fileTvc, err := os.Open("../samples/Subscription.tvc") assert.Equal(t, nil, err) byteTvc, err := ioutil.ReadAll(fileTvc) assert.Equal(t, nil, err) tvc := base64.StdEncoding.EncodeToString(byteTvc) cryptoUC := crypto.NewCrypto(tvmUC.config, tvmUC.client) keypair, err := cryptoUC.GenerateRandomSignKeys() assert.Equal(t, nil, err) walletAddress := "0:2222222222222222222222222222222222222222222222222222222222222222" // # Deploy message deploySet := domain.DeploySet{Tvc: tvc} callSet := domain.CallSet{FunctionName: "constructor", Input: json.RawMessage(`{"wallet":"` + walletAddress + `"}`)} signerKey := domain.NewSigner(domain.SignerKeys{keypair}) abiUC := abi.NewAbi(tvmUC.config, tvmUC.client) // # Get account deploy message deployMessage, err := abiUC.EncodeMessage(&domain.ParamsOfEncodeMessage{Abi: abiValue, Signer: signerKey, DeploySet: &deploySet, CallSet: &callSet}) assert.Equal(t, nil, err) // # Send grams fileAbiG, err := os.Open("../samples/Giver.abi.json") assert.Equal(t, nil, err) byteAbiG, err := ioutil.ReadAll(fileAbiG) assert.Equal(t, nil, err) eventsAbiG := &domain.AbiContract{} err = json.Unmarshal(byteAbiG, &eventsAbiG) assert.Equal(t, nil, err) giverAbi := domain.NewAbiContract(eventsAbiG) callSetN := domain.CallSet{} callSetN.FunctionName = "grant" callSetN.Input = json.RawMessage(`{"dest":"` + deployMessage.Address + `"}`) assert.Equal(t, nil, err) procUC := processing.NewProcessing(tvmUC.config, tvmUC.client) _, err = procUC.ProcessMessage(&domain.ParamsOfProcessMessage{ MessageEncodeParams: &domain.ParamsOfEncodeMessage{ Abi: giverAbi, Signer: domain.NewSigner(domain.SignerNone{}), Address: "0:b61cf024cda7dad90e556d0fafb72c08579d5ebf73a67737317d9f3fc73521c5", CallSet: &callSetN}, SendEvents: false}, nil) assert.Equal(t, nil, err) // # Deploy account _, err = procUC.ProcessMessage(&domain.ParamsOfProcessMessage{ MessageEncodeParams: &domain.ParamsOfEncodeMessage{ Abi: abiValue, Signer: signerKey, DeploySet: &deploySet, CallSet: &callSet, }, SendEvents: false}, nil) assert.Equal(t, nil, err) // # Get account data time.Sleep(time.Second * 5) netUC := net.NewNet(tvmUC.config, tvmUC.client) filter := fmt.Sprintf(`{"id":{"eq":"%s"}}`, deployMessage.Address) query := &domain.ParamsOfWaitForCollection{Collection: "accounts", Filter: json.RawMessage(filter), Result: "id, boc"} valueNet, err := netUC.WaitForCollection(query) assert.Equal(t, nil, err) // # Get account balance type resData struct { ID string `json:"id"` Boc string `json:"boc"` } resS := &resData{} err = json.Unmarshal(valueNet.Result, &resS) assert.Equal(t, nil, err) bocUC := boc.NewBoc(tvmUC.config, tvmUC.client) parsed, err := bocUC.ParseAccount(&domain.ParamsOfParse{Boc: resS.Boc}) assert.Equal(t, nil, err) type resBalance struct { Balance string `json:"balance"` } resB := &resBalance{} err = json.Unmarshal(parsed.Parsed, &resB) assert.Equal(t, nil, err) // # Run executor (unlimited balance should not affect account balance) subscribeParams := `{"subscriptionId":"0x1111111111111111111111111111111111111111111111111111111111111111", "pubkey":"0x2222222222222222222222222222222222222222222222222222222222222222", "to":"0:3333333333333333333333333333333333333333333333333333333333333333", "value":"0x123", "period":"0x456"}` callSetSP := domain.CallSet{FunctionName: "subscribe", Input: json.RawMessage(subscribeParams)} encodeMessage, err := abiUC.EncodeMessage(&domain.ParamsOfEncodeMessage{ Abi: abiValue, Signer: signerKey, Address: deployMessage.Address, CallSet: &callSetSP, }) assert.Equal(t, nil, err) result, err := tvmUC.RunExecutor(&domain.ParamsOfRunExecutor{ Message: encodeMessage.Message, Account: domain.AccountForExecutor{ domain.AccountForExecutorAccount{ resS.Boc, util.BoolToPointerBool(true)}}, Abi: abiValue, ReturnUpdatedAccount: util.BoolToPointerBool(true)}) assert.Equal(t, nil, err) // # Get account balance again parsed, err = bocUC.ParseAccount(&domain.ParamsOfParse{Boc: result.Account}) assert.Equal(t, nil, err) resB2 := &resBalance{} err = json.Unmarshal(parsed.Parsed, resB2) assert.Equal(t, nil, err) bal1 := new(big.Int) bal1, _ = bal1.SetString(resB.Balance[2:], 16) bal2 := new(big.Int) bal2, _ = bal2.SetString(resB2.Balance[2:], 16) assert.Equal(t, -1, bal1.Cmp(bal2)) // # Run executor in standard mode (limited balance) result, err = tvmUC.RunExecutor(&domain.ParamsOfRunExecutor{ Message: encodeMessage.Message, Account: domain.AccountForExecutor{ domain.AccountForExecutorAccount{ resS.Boc, util.BoolToPointerBool(false)}}, Abi: abiValue, ReturnUpdatedAccount: util.BoolToPointerBool(true)}) assert.Equal(t, nil, err) type resTrans struct { InMsg string `json:"in_msg"` } resTransS := &resTrans{} err = json.Unmarshal(result.Transaction, &resTransS) assert.Equal(t, nil, err) assert.Equal(t, encodeMessage.MessageID, resTransS.InMsg) resCmp := result.Fees.TotalAccountFees.Cmp(big.NewInt(0)) assert.Equal(t, 1, resCmp) // # Check subscription callSet = domain.CallSet{FunctionName: "getSubscription", Input: json.RawMessage(`{"subscriptionId":"0x1111111111111111111111111111111111111111111111111111111111111111"}`)} encodedMessage, err := abiUC.EncodeMessage(&domain.ParamsOfEncodeMessage{ Abi: abiValue, Address: deployMessage.Address, Signer: signerKey, CallSet: &callSet, }) assert.Equal(t, nil, err) result2, err := tvmUC.RunTvm(&domain.ParamsOfRunTvm{Message: encodedMessage.Message, Account: result.Account, Abi: abiValue}) assert.Equal(t, nil, err) type resObjStr struct { Value0 struct { PubKey string `json:"pubkey"` } `json:"value0"` } resObj := &resObjStr{} err = json.Unmarshal(result2.Decoded.Output, resObj) assert.Equal(t, nil, err) assert.Equal(t, "0x2222222222222222222222222222222222222222222222222222222222222222", resObj.Value0.PubKey) }) clientMain.Destroy() configConn.Network.Endpoints = domain.GetDevNetBaseUrls() clientMain, err = client.NewClientGateway(configConn) assert.Equal(t, nil, err) defer clientMain.Destroy() tvmUC.config = configConn tvmUC.client = clientMain t.Run("TestRunGet", func(t *testing.T) { electorAddress := "-1:3333333333333333333333333333333333333333333333333333333333333333" electorСode := "te6ccgECXgEAD04AART/APSkE/S88sgLAQIBIAMCAFGl//8YdqJoegJ6AhE3Sqz4FXkgTio4EPgS+SAs+BR5IHF4E3kgeBSYQAIBSBcEEgGvDuDKmc/+c4wU4tUC3b34gbdFp4dI3KGnJ9xALfcqyQAGIAoFAgEgCQYCAVgIBwAzs+A7UTQ9AQx9AQwgwf0Dm+hk/oAMJIwcOKABbbCle1E0PQFIG6SMG3g2zwQJl8GbYT/jhsigwf0fm+lIJ0C+gAwUhBvAlADbwICkTLiAbPmMDGBUAUm5h12zwQNV8Fgx9tjhRREoAg9H5vpTIhlVIDbwIC3gGzEuZsIYXQIBIBALAgJyDQwBQqss7UTQ9AUgbpJbcODbPBAmXwaDB/QOb6GT+gAwkjBw4lQCAWoPDgGHuq7UTQ9AUgbpgwcFRwAG1TEeDbPG2E/44nJIMH9H5vpSCOGAL6ANMfMdMf0//T/9FvBFIQbwJQA28CApEy4gGz5jAzhUACO4ftRND0BSBukjBwlNDXCx/igCASAUEQIBWBMSAl+vS22eCBqvgsGPtsdPqIlAEHo/N9KQR0cBbZ43g6kIN4EoAbeBAUiZcQDZiXM2EMBdWwInrA6A7Z5Bg/oHN9DHQW2eSRg28UAWFQJTtkhbZ5Cf7bHTqiJQYP6PzfSkEdGAW2eKQg3gSgBt4EBSJlxANmJczYQwFhUCSts8bYMfjhIkgBD0fm+lMiGVUgNvAgLeAbPmMDMD0Ns8bwgDbwREQQIo2zwQNV8FgCD0Dm+hkjBt4ds8bGFdWwICxRkYASqqgjGCEE5Db2SCEM5Db2RZcIBA2zxWAgHJMRoSAW4a85Q1ufW1LEXymEEC7IZbucuD3mjLjoAesLeX8QB6AAhIIRsCAUgdHAHdQxgCT4M26SW3Dhcfgz0NcL//go+kQBpAK9sZJbcOCAIvgzIG6TXwNw4PANMDIC0IAo1yHXCx/4I1EToVy5k18GcOBcocE8kTGRMOKAEfgz0PoAMAOgUgKhcG0QNBAjcHDbPMj0APQAAc8Wye1Uf4UwIBIB8eA3k2zx/jzIkgCD0fG+lII8jAtMfMPgju1MUvbCPFTFUFUTbPBSgVHYTVHNY2zwDUFRwAd6RMuIBs+ZsYW6zgXUhcA5MAds8bFGTXwNw4QL0BFExgCD0Dm+hk18EcOGAQNch1wv/gCL4MyHbPIAk+DNY2zyxjhNwyMoAEvQA9AABzxbJ7VTwJjB/4F8DcIFQgIAAYIW6SW3CVAfkAAbriAgEgMCICASAlIwOnTbPIAi+DP5AFMBupNfB3DgIo4vUySAIPQOb6GOINMfMSDTH9P/MFAEuvK5+CNQA6DIyx9YzxZABIAg9EMCkxNfA+KSbCHif4rmIG6SMHDeAds8f4XSRcAJYjgCD0fG+lII48AtM/0/9TFbqOLjQD9AT6APoAKKsCUZmhUCmgBMjLPxbL/xL0AAH6AgH6AljPFlQgBYAg9EMDcAGSXwPikTLiAbMCASApJgP1AHbPDT4IyW5k18IcOBw+DNulF8I8CLggBH4M9D6APoA+gDTH9FTYbmUXwzwIuAElF8L8CLgBpNfCnDgIxBJUTJQd/AkIMAAILMrBhBbEEoQOU3d2zwjjhAxbFLI9AD0AAHPFsntVPAi4fANMvgjAaCmxCm2CYAQ+DPQgVFMnArqAENch1wsPUnC2CFMToIASyMsHUjDLH8sfGMsPF8sPGss/E/QAyXD4M9DXC/9TGNs8CfQEUFOgKKAJ+QAQSRA4QGVwbds8QDWAIPRDA8j0ABL0ABL0AAHPFsntVH8oWgBGghBOVlNUcIIAxP/IyxAVy/+DHfoCFMtqE8sfEss/zMlx+wAD9yAEPgz0NMP0w8x0w/RcbYJcG1/jkEpgwf0fG+lII4yAvoA0x/TH9P/0//RA6MEyMt/FMofUkDL/8nQURq2CMjLHxPL/8v/QBSBAaD0QQOkQxORMuIBs+YwNFi2CFMBuZdfB21wbVMR4G2K5jM0pVySbxHkcCCK5jY2WyKAvLSoBXsAAUkO5ErGXXwRtcG1TEeBTAaWSbxHkbxBvEHBTAG1tiuY0NDQ2UlW68rFQREMTKwH+Bm8iAW8kUx2DB/QOb6HyvfoAMdM/MdcL/1OcuY5dUTqoqw9SQLYIUUShJKo7LqkEUZWgUYmgghCOgSeKI5KAc5KAU+LIywfLH1JAy/9SoMs/I5QTy/8CkTPiVCKogBD0Q3AkyMv/Gss/UAX6AhjKAEAagwf0QwgQRRMUkmwx4iwBIiGOhUwA2zwKkVviBKQkbhUXSwFIAm8iAW8QBKRTSL6OkFRlBts8UwK8lGwiIgKRMOKRNOJTNr4TLgA0cAKOEwJvIiFvEAJvESSoqw8StggSoFjkMDEAZAOBAaD0km+lII4hAdN/URm2CAHTHzHXC/8D0x/T/zHXC/9BMBRvBFAFbwIEkmwh4rMUAANpwhIB6YZp0CmGybF0xQ4xcJ/WJasNDpUScmQJHtHvtlFfVnQACSA3MgTjpwF9IgDSSa+Bv/AQ67JBg19Jr4G+8G2eCBqvgoFpj6mJwBB6BzfQya+DP3CQa4WP/BHQkGCAya+DvnARbZ42ERn8Ee2eBcGF/KGZQYTQLFQA0wEoBdQNUCgD1CgEUBBBjtAoBlzJr4W98CoKAaoc25PAXUE2MwSk2zzJAts8UbODB/QOb6GUXw6A+uGBAUDXIfoAMFIIqbQfGaBSB7yUXwyA+eBRW7uUXwuA+OBtcFMHVSDbPAb5AEYJgwf0U5RfCoD34UZQEDcQJzVbQzQDIts8AoAg9EPbPDMQRRA0WNs8Wl1cADSAvMjKBxjL/xbMFMsfEssHy/8B+gIB+gLLHwA8gA34MyBuljCDI3GDCJ/Q0wcBwBryifoA+gD6ANHiAgEgOTgAHbsAH/BnoaQ/pD+kP64UPwR/2A6GmBgLjYSS+B8H0gGBDjgEdCGIDtnnAA6Y+Q4ABHQi2A7Z5waZ+RQQgnObol3UdCmQgR7Z5wEUEII7K6El1FdXTjoUeju2wtfKSxXibKZ8Z1s63gQ/coRQXeBsJHrAnPPrB7PzAAaOhDQT2zzgIoIQTkNvZLqPGDRUUkTbPJaCEM5Db2SShB/iQDNwgEDbPOAighDudk9LuiOCEO52T2+6UhCxTUxWOwSWjoYzNEMA2zzgMCKCEFJnQ3C6jqZUQxXwHoBAIaMiwv+XW3T7AnCDBpEy4gGCEPJnY1CgA0REcAHbPOA0IYIQVnRDcLrjAjMggx6wR1Y9PAEcjomEH0AzcIBA2zzhXwNWA6IDgwjXGCDTH9MP0x/T/9EDghBWdENQuvKlIds8MNMHgCCzErDAU/Kp0x8BghCOgSeKuvKp0//TPzBFZvkR8qJVAts8ghDWdFJAoEAzcIBA2zxFPlYEUNs8U5OAIPQOb6E7CpNfCn7hCds8NFtsIkk3GNs8MiHBAZMYXwjgIG5dW0I/AiqSMDSOiUNQ2zwxFaBQROJFE0RG2zxAXAKa0Ns8NDQ0U0WDB/QOb6GTXwZw4dP/0z/6ANIA0VIWqbQfFqBSULYIUVWhAsjL/8s/AfoCEsoAQEWDB/RDI6sCAqoCErYIUTOhREPbPFlBSwAu0gcBwLzyidP/1NMf0wfT//oA+gDTH9EDvlMjgwf0Dm+hlF8EbX/h2zwwAfkAAts8UxW9mV8DbQJzqdQAApI0NOJTUIAQ9A5voTGUXwdtcOD4I8jLH0BmgBD0Q1QgBKFRM7IkUDME2zxANIMH9EMBwv+TMW1x4AFyRkRDAByALcjLBxTMEvQAy//KPwAe0wcBwC3yidT0BNP/0j/RARjbPDJZgBD0Dm+hMAFGACyAIvgzINDTBwHAEvKogGDXIdM/9ATRAqAyAvpEcPgz0NcL/+1E0PQEBKRavbEhbrGSXwTg2zxsUVIVvQSzFLGSXwPg+AABkVuOnfQE9AT6AEM02zxwyMoAE/QA9ABZoPoCAc8Wye1U4lRIA0QBgCD0Zm+hkjBw4ds8MGwzIMIAjoQQNNs8joUwECPbPOISW0pJAXJwIH+OrSSDB/R8b6Ugjp4C0//TPzH6ANIA0ZQxUTOgjodUGIjbPAcD4lBDoAORMuIBs+YwMwG68rtLAZhwUwB/jrcmgwf0fG+lII6oAtP/0z8x+gDSANGUMVEzoI6RVHcIqYRRZqBSF6BLsNs8CQPiUFOgBJEy4gGz5jA1A7pTIbuw8rsSoAGhSwAyUxKDB/QOb6GU+gAwoJEw4sgB+gICgwf0QwBucPgzIG6TXwRw4NDXC/8j+kQBpAK9sZNfA3Dg+AAB1CH7BCDHAJJfBJwB0O0e7VMB8QaC8gDifwLWMSH6RAGkjo4wghD////+QBNwgEDbPODtRND0BPQEUDODB/Rmb6GOj18EghD////+QBNwgEDbPOE2BfoA0QHI9AAV9AABzxbJ7VSCEPlvcyRwgBjIywVQBM8WUAT6AhLLahLLH8s/yYBA+wBWVhTEphKDVdBJFPEW0/xcbn16xYfvSOeP/puknaDtlqylDccABSP6RO1E0PQEIW4EpBSxjocQNV8FcNs84ATT/9Mf0x/T/9QB0IMI1xkB0YIQZUxQdMjLH1JAyx9SMMsfUmDL/1Igy//J0FEV+RGOhxBoXwhx2zzhIYMPuY6HEGhfCHbbPOAHVVVVTwRW2zwxDYIQO5rKAKEgqgsjuY6HEL1fDXLbPOBRIqBRdb2OhxCsXwxz2zzgDFRVVVAEwI6HEJtfC3DbPOBTa4MH9A5voSCfMPoAWaAB0z8x0/8wUoC9kTHijocQm18LdNs84FMBuY6HEJtfC3XbPOAg8qz4APgjyFj6AssfFMsfFsv/GMv/QDiDB/RDEEVBMBZwcFVVVVECJts8yPQAWM8Wye1UII6DcNs84FtTUgEgghDzdEhMWYIQO5rKAHLbPFYAKgbIyx8Vyx9QA/oCAfoC9ADKAMoAyQAg0NMf0x/6APoA9ATSANIA0QEYghDub0VMWXCAQNs8VgBEcIAYyMsFUAfPFlj6AhXLahPLH8s/IcL/kssfkTHiyQH7AARU2zwH+kQBpLEhwACxjogFoBA1VRLbPOBTAoAg9A5voZQwBaAB4w0QNUFDXVxZWAEE2zxcAiDbPAygVQUL2zxUIFOAIPRDW1oAKAbIyx8Vyx8Ty//0AAH6AgH6AvQAAB7TH9Mf0//0BPoA+gD0BNEAKAXI9AAU9AAS9AAB+gLLH8v/ye1UACDtRND0BPQE9AT6ANMf0//R" electorData := "te6cckICAdwAAQAAXWYAAANP5zNFdL1WHOmhM8muxAeRTL3uvNJvs927E6v1fF69v/Dcp40YdRDZlwABAAIAAwEtXqwPR16r70dgkYTnKgAHGBnmNy4oAJAABAIBIADdAN4Bd6Beqw50XqyPRwAAgADQmeTXYgPIpl73Xmk32e7didX6vi9e3/huU8aMOohsy7jBWbxZxZADD75EMrDvIAD9AgEgAAUABgIBIAAHAAgCASAAJwAoAgEgACkAKgIBIAAJAAoCASAAWwBcAgEgAAsADAIBIAANAA4CASAAHQAeAgEgAA8AEAIBIAAVABYA3r6Qf1spdPq/bwqhp4mDQLP3bEuicrlaPku4CcHVKbaZdjax+CCkAF6rl8MAArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkGdN4j+eSPKxKEbLLoxk8tLP9ttT28kx/w+iR/jTmNZQIBIAARABICASAAEwAUAN2+QmbGo3ETwCSfeDPz5r7UHt0Yn1NxPT3qTuMXrVRKl8xtY/BBSAC9Vy+MAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSsDZ/mI6bp28e90jhPvkXOqNHObIGmtABjoVh9DHWv9sA3b4e5pHb3M+xe6cvAv7AhM1zTFmuaqorSftD4jZ9r+dvmNrH4IKQAXquX3gACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKbo4P3UNm8JKNoBwOEZFcTQhfHniCkSJVahkCptiFaA7gDdviMRh2NrVSlqcu7snEZIKVBulgAPnApzCKN/fvBft4/Y2sfggpABeq5frAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApac/HSfJVt17KIcYY2fQhe4jXJ1WswaMOy7Uwu/Z7JL6AgFiABcAGAIBIAAZABoA3b3pOjXf2g8qttV0zputlBzWsVmpquHc/d6qWko1cQKHsbWPwQUgAvVcv5gAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUpQ9MxbmC+nql/IndJ1YfwX72II+spmVwaA2YQZQDuYTADdvdaezDIUicJFsC7dafjvy4+QyEabBhvXpSSfTaMjWi8xtY/BBSAC9Vy/uAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSVXvjTwFo2kKXKKo9hxBHh3usmapE9y0c0nTvQfZnKs0AgEgABsAHADdvk76i6Dsoqy3y5tiNwGSP0Mb3hSGnkjYFkt2ofiQ77qMbWPwQUgAvVcv5gAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU/i3C/J0bWQ20cXVLECSH+ZKq5pw5kJ0Uvdev0Lo9GK/AN2+AfGvpOUCoOzBRSyFTH9PdIEu4nn93taJCGkHU6Fx01jax+CCkAF6rl/MAArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCnf/KF2b1XSss5qd+AIbRiHmVOdn+F1O+alxlrTv3uB7YA3b4StUEBU8pK39mx+0y87Ejv6XHvpxF+vJtd62F76ZKOGNrH4IKQAXquX6wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKSofeDxK2rp/r6+6mAJyz7uQG057B0zwwBX0CX5+vQqDgIBIAAfACACAUgAIwAkAN6+uGAMVwwZ7xuRvyzYNwnXGJnCRr+rtbCNxw/jK1yB99Y2sfggpABeq5feAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApG0EbsEGDGJmFkajst/1D312DWYN3Cugr7AFWaYqOiGcCASAAIQAiAN2+TTbMdz9PfDC1QwM39pFJ203JsV5nwyq7K/S8Ktqh4kxtY/BBSAC9Vy+GAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBS0AeCSiRJ21mXNr1N8KycQOFhwaNRy0JqYKeCl7fx1FcA3b5oIcEmxkc2EzIzFOA8b+6u65iqCTdKgpLyz+uxdOnejG1j8EFIAL1XL9YABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFP6kJRf82LDczM1rRBSKuHgLYqsKg241roAUNRsv5/ZuwIBIAAlACYA3b5bAPci8hDQku7Qjt0ZHNan/zY1I/f81P210bwGD1Aq7G1j8EFIAL1XL9YABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFKUQMw0vTqYFFEMQnm/8ON/oMVSwSBx4ketavEhZheTTQDdvgclhdc9Ft5EUIyaWQ0cJX9D73TyFecQs6hCHmOAlFSY2sfggpABeq5fLAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApByAMC9j9dKoUOWrPu71v6Mfbud+bx7E/uqf/5buhSr6AN2+LQ+nBgkFjaufFVAl7tLxdoDJJvuZ6vlNmpbCTtwjKtjax+CCkAF6rl/cAArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCnxNGsjPV4EU+cKd5i0l+YgwRKVFQ4Di1FSpk92vRPSsYCASAAfwCAAgEgAKsArAIBIAArACwCASAARQBGAgEgAC0ALgIBIAA9AD4CASAALwAwAgFIADUANgIBIAAxADICASAAMwA0AN2+YDbay0cJc44b1pP62If/gewdpF+YdJLN7ghvKU6SRQxtY/BBSAC9Vy/OAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBShl4PyIiOEUiKAhTkg1/iDi+JqvER1TFEAzFXsaE+HecA3b57y7CyH81aMjfymoJtv2xh02aI92XjTVxbHHyZfcOz7G71VqmgAL1XI24AHgAAuLpB+mhHbBc9MOtbl3+Cc2/UwVd8nHPamFSwW86XbNIWJS2s6WUyz9p2btsuu3Pd5F9gzaZYy0XL98bWajKfKwDdvmsfIYDQWs72jAC5xcCEDYSr1TQLf1gf611TrjBUUSJsbWPwQUgAvVcv1gAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUti5Vs1vbuFsZvSxn28hm8s2BA1H9jQU+/FzpwLSlRgnAN2+bTBFEudLKuo688P1+rSCi2emppN+FoZkVfbDYnHgPWxtY/BBSAC9Vy/OAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBT3TTsjvmOUwguvo215O+d0IFbX7hzOE0yVj4GLVhs3ZMA3b56PfFIFT9+OUoL4ORLAJHYK8DG91QQrJOu9zUgRAAHzG1j8EFIAL1XL+4ABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFKwaq5a4ST1Ip76EooN91OBNtSi6FF8TjoCeS/emukDHQIBIAA3ADgCAW4AOQA6AgN7YAA7ADwA3b1EPeT5HaiQ5OHjiL0ztYEjlIAPKiQv2caIilvuCPy0xtY/BBSAC9Vy/EAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBT3J3Ga8PK2tzmbMIvVVb8nheLKQ1xsrfPB43j5UniupUADdvVqlKDFWz0Y0lzGESfH7Zf2CPgIBhG6jzPZiWXQj9kbG1j8EFIAL1XL94ABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFK74jGYIQJLeaD912xfQ5X2fVHg02uB2zmvN0nvoGc7TwAN28ymbW9IM+KR+ifpBG9y7VPsKQqLkuuJ3JZCBQGIDFMxtY/BBSAC9Vy/WAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBT1ycTbsoBM0Lt5+p0N6GGlEcSMkffmPopPecSQtYYhA8AA3bzv+GzHoVGKryu9KryxHUiPHYGgDBqLMqn8Qbrl7LK7G1j8EFIAL1XL5YABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFK8G1bXoMiYq3AJCEvFq7E9XHMwEdHzkTDzJMQo1xmFjQAIBIAA/AEACAVgAQQBCAN6+qJO71km/Lh55ywhAJWOM3XkG7rykDv7uf9vVSMyWORY2sfggpABeq5frAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApUfgjRFEOTeWnk7C2HPzxdzfXBQdJJvPZ4S8bAFvGUYsA3r6VRrx7UST22D1sWmK4iQpIuTMWjhQcASKUMabAxJl4Bjax+CCkAF6rl/MAArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkgx1AcdSu3pUcn5p1FEMJBhIf5vPggTfc9X1MRapr5MgDdvlfU1HM3RAErG2Sqrzm0QyoA3nRiEWWt3vxLHIWCAo8sbWPwQUgAvVcvjAAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU3C4lFmQADV6jqD/4twMEc9mn3brdGI1ob4TFnp7DATtAgFuAEMARADdvY7muikCcVgEx2nDhFtrOjeALkYujfY7oZ+CepLbvaBjax+CCkAF6rl+8AArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoClHd5m0Wre2EgQq5dlC4Dg+D2fr7f1bO4EYIdjV5hyi24AN29nVVthNHZ8kpznCYA7HIlbMAJINha06Lts+DXIUZ4nWNrH4IKQAXquXywACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKcrYOz16+9GrMy+C4fY7uXRIqOlGw5Hep4zyPPpTaEbPgCASAARwBIAgEgAFMAVAIBIABJAEoCASAATwBQAgFIAEsATAIBagBNAE4A3b48gws+k1yr7nF9k1HNQcJYrMyOzYpiSx19WQn8XXr5GNrH4IKQAXquX7wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKXwAk8qTnBBhbfjTZaRNGK6zdMB96gqs7ndkH31vJuOOgDdvidMnN7DzdphpuPp1C6jjzZ8ZshpNZpx8edQg1/fNskY2sfggpABeq5fzAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApGM+TzCxGVFHsNPKTeO/97UzK8r/Xt8HIUYTTum5RwsqAN29ytpXW0igKYkUEkVOwiyYi9snEWoxcctHHODCBRMCkbG1j8EFIAL1XL84ABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIb/DLGwkCFBKJfjqhffM1FYoZsdYvvgdzH2QlWERQnGwA3b3MJk+CXgxEgw9MXs5XtKr7qBDOx0beBMN/XOu4ctCtsbWPwQUgAvVcvzgAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU0CiSMqZ+vGndMwtTs8i0aoiwEnl8usuoh5kbN68/5MHAIBIABRAFIA3r6cxwhZh2EGshtZi6mhDJkyJZw29EreuVoXjmf2r9L3pjax+CCkAF6rl94AArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCnO6gD+V25MG3ilZabwot+Q0+IzDk7XXdDJFezRWXrPSwDdvldg+wHEFUxJXI+/yn8itkdMzX8Ev+UHdYDYKX1D1U6MbWPwQUgAvVcvfgAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUinJsF+DWAxps7CaPLKywoPuXt/Il+Iy29K3G15JIFN7AN2+RGrMAR+p8OILYToFjnKKXIow/3W0vPylW0asTJms6ixtY/BBSAC9Vy/EAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBTsb00C2hUK6crEIGKdGrf4PXhlt9S4k5FPLwlipP/rPMCASAAVQBWAN++6U6Vrnd6M4GSrnRn8dVp4n9+nRAS8tJGc8Ux+QRD9/sbWPwQUgAvVcv1gAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU99Bamlm8stiDBiH9HXbEUQI654jg9q+0DR52APrU3dPAAgEgAFcAWADevqxE7d93M5DNtC+T+EVOqcfKRaqJSDRt+PZCpZzkTEQmNrH4IKQAXquX9wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKbIMjfOMTZAGzuBPiNau0znMn1iv91HpOxaqr78/mC4sAgEgAFkAWgDdvmnkXBvFxJQOH9j4Ou+vUxFPOs1vPFrKhiFSxyydumbsbWPwQUgAvVcvngAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU35wkH2+hU9ZYBz71o0Q4U3gwfgUQeVHVYQB2MO+8WRlAN2+LhUIm75Q9c40t/Mcu3js50IYEynIRq/PHb0iEQcd/xjax+CCkAF6rl+IAArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCnyzU52ex8xslKEHlnp0pjb+0t770vRho3TrVckIIrBt4A3b4u3SlpTdp09t4nZYTdepWnU2gvWrCsS+DDda+gZaO8WNrH4IKQAXquX4gACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKd/6rdK2MBrDXu+7qc4jXS2gDjOe3wMEqaO2MqU4JrefgIBIABdAF4CASAAawBsAgEgAF8AYAIBIABnAGgCASAAYQBiAN6+hJUh55OwKwNs5pjDr5UelUjNW4YrcE+lzJ6AsXGjxhY2sfggpABeq5frAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqAp7KKZkQVV+16IfuCrq+uLL8C2SNLa5TikXhCldC5dai0CAW4AYwBkAgFIAGUAZgDdvbBHog7Wkek3b38vYNZXEpDjTvThuFRn3MPXwM9/rpBjax+CCkAF6rl94AArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkT/B16+kZ+u+WqI9qhKUU3t2BU2j2jdatLrEUxT+B6FoAN29p1QcN3tYoM/EypVHMelx9tyfpoBuqhcJ0BHT0yWTzmNrH4IKQAXquX5wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKc7C5LM6SUBEWhhZrWk3fUjv4yoJ1krsCpZt/dKlIbCzgA3b31KWnZ6AsijiNZ0HVlnBfd2cOc8AaCDqAce8rSpxLasbWPwQUgAvVcv3gAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU8k/+IKrSrdBJoGD3gZWcjwH/c4z8jjBlqXfUiI57L/NADdvfxMiuUqBXtI+xH5ANsLZVP68IJ8FJtMfFo2Jz3a2UkxtY/BBSAC9Vy/OAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBThp6SsxRZqGrm+/NC8brIty1jFaORJUFQyWewSA25e8MAgJyAGkAagDevoJ3COTOgaC76zFezgJLpJXz4/q1+DopQbdzGlitMhYGNrH4IKQAXquXywACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKcHuS1iSMjytaH5vXhLfRsOTN8s7+AGvZLnqAFp/qwnAAN29r0/uagNaCen/CdZaZXaImbBHl8/wjcLGSuEc2U0ZaGNrH4IKQAXquXuwACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKbGbxx3eBYSfexqbKwVIP+TbRGiojxDaJ7cMn1kCpx+egA3b2HAY+cDJMCsng+te2st2zK47XFovY1W1tRr9GhgHX4Y2sfggpABeq5frAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApyjhL/D4jmkSg1cYkAR2OnjoFPp1EJQvpiPQNea9gRZWAIBIABtAG4CASAAdwB4AgEgAG8AcAIBIABzAHQCASAAcQByAN2+YlLYZdTJoTuWif4XMwh7kwqih44XP1qFvWZxAqS/cIxtY/BBSAC9Vy+eAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBScdcTl97mpu7C9eav/zKdryh4vCraz0cbAgnx5oNJEv0A3b4WdQPgPV4w8OEI5QV9UrzWrTb3qAlFhjUw8e0k35+aWNrH4IKQAXquX6wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKfoenpK4rGeteml/1HmYvwYCkr3YRZOzKneFlcjfQV7mgDdviR+Mv+HOJaaMM37edWODITdBErhoWECSozX4PKrVmkY2sfggpABeq5fnAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApYk+IFq2CsobugJe+iYM4udWjm1lnMizb3mURRxJ8AB6AgN+ZgB1AHYA3b5tXryV8sdBlVKuxToBMgwjPIMc/u7x60q6g4EgtudJ7G1j8EFIAL1XL+4ABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFL2u62y4H0+3QgDD+uwqXkOR2DKId+5YdkdfQw0rASVyQDcvJhgqjTduioW5PQnHhdx9h8einoRb7v6YvDlNblVWeY2sfggpABeq5fPAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApEsAOq7ypJ7PwRTbzZAhmSdUvtOzgaysSrmlFWP69di0A3Lyi1rNdDWcON/zVM/8XwhFuCs6tcZGU5G1HiUSzMQjmNrH4IKQAXquXywACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQgkL3FtVdmTmlCY7SAJiPaifS9xYyk4TDUJMlulK2fuAgEgAHkAegIBIAB9AH4A3b5yCo7ejwPtTyayDzf0f8UrjLrXIxX1t4al25tqsVoSjG1j8EFIAL1XL9YABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFOpNyCZZ1+zxArsLn0Iu6IYLr5uxplVdzPkUUZImbwijwICcQB7AHwA3b1dK5vyl9pIHXV72hCW4WSpDALQ2ylOo+SBXbpAZcnkxtY/BBSAC9Vy+WAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBT3FaBnuTMH8jj3hlPLCdGj5shy8zn47Cc0xBO7knQs2EADdvWoB4jfZ7Og/ysSaRTHtcGHVpMMONJFQPwF/N8E9sMzUdrCB6AAL1XKv4ABgABD3+vvcAyB6NguRmql8pH8uQqFUn2eJgXiFjCPTlRMInQaPYiFLdyyXIPb3Wbr2r8uEXhb03UBSAibXsE994D7QAN2+RNKFfmeJKQu2dk0ny6sE4RaI4L7iWJxJXj3QjomA88xtY/BBSAC9Vy+WAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSi8AKZkkSJe3IFUWa+4S9ojvDLMxRUs9iSFQV5hBkjMMA3b55uYIP0D3oczk6wHRL+XTrlvHj9Ddfp1lnPGpDr/k5zG1j8EFIAL1XL8QABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFNeihtUeHcJYj9d6+yF47bjWtmflYGGmz27PgL7MQK2BwIBIACBAIICASAAlQCWAgEgAIMAhAIBIACLAIwCASAAhQCGAgFIAIcAiADevrCP8rIU1QnTeB1zYafMtfT7l2+OOGzjyQgrytiAXRPWNrH4IKQAXquX4gACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKVZ/K3qZb/a3xuIvo6stKvtIGXhaEzcq0YnbLWK+2jzUAN6+haD/9EZpyUFHXrPz/9bgZe6Uz7/cuCCHd0TW+WR6XQYtYgQrbgBeq49SAAIAANLHkhi0LPH/dEKW4Ez94YWZEQdNGiVBr1Tmwo3EOMKbfrkSSaeDNzKHp4D4+W6PzbGslifEv0cGlYS27hXlNysA3b5OAeEH53jy44aRvQav3G1kqjsZtv1JacnRVcqxrPRGLG1j8EFIAL1XL9YABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFNlZnzZRqh/B4DZmm2goAsgaAIHrf4DTqojl2iQK6L6sQIBWACJAIoA3b3EYivlYMMhek1b7wd7qo3J8I7eFLyDF5FZONxIdGl4MbWPwQUgAvVcvfgAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU5Gn7vck2QG+VD7suV0vZLHARG/Xl1O5swBTV85xY0XbADdveQgcGLUxzBHOBg9bSDVSlOi0o+wMoiMoQENJzAO4ZQxtY/BBSAC9Vy/EAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSxhaUTGYPUhxqv4Ys5NThMLlqvFB7/mBfmpDpwfh8qPMAN++3yjClP1REidw3uElKizqWekCP8OrWJMbcWXR27N2EAsbWPwQUgAvVcvzgAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU5fIfOhbin9y1ERfkS13Y7A0kR2C6lEssGw0MDh//ee9AAgEgAI0AjgIBZgCPAJACASAAkQCSAN293sh9YpNe1oiqSIIIKil0jMbUakLt2leL2CJTkP1FwTG1j8EFIAL1XL84ABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFKCjO0AucEk2dO30bJnk933CawAPkaYBzzoHVd2TV+xwwA3b3LxAWO5gCOcn9/WsP2ULYBPkDVf1oYLp0LqowWE02vMbWPwQUgAvVcvhgAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUhWd6IzwmMcNjW20wVXwTO/vSi1wwJM0tLbQ39vJURXpADdvnMxvMoeJ5C7SVYRvB9PLA7jqE0hA/Q9o/MoGLmxdq+MbWPwQUgAvVcvhgAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUhgy7I1Kj8OjV1Pzzd/gojFDf7Xud6qceSSdA0meXwmrAgFuAJMAlADdvasThQXSjDwtaFCcVBSr6TOrfekGENjMhO2vOA5zn0hjax+CCkAF6rl/MAArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkYViq6Y5sAdJPimfPSEMUDS7RddACMZrcMVcnh5S706IAN29lYX01xxQ/1S2klXduqSjDq4xzeLQK6bUwPh/ryiPmmNrH4IKQAXquX5wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKVM1Sun7FHtPF3OKx0o5fQS82hhA1z0wDJdoCD9IMIOxgCASAAlwCYAgEgAKUApgIBWACZAJoCASAAnQCeAN2+Rasxx62AoxAJEX+icuC5MKf+XSYI2NwS3XS+txuuKcxtY/BBSAC9Vy+eAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSVM5MnUYmkJ/0axeSNW23o/zJ43xInGHjP4JDuZm/uJMCASAAmwCcAN2+OP/ex80D30CobZK3hhckDnNJ1HvR6xdeVC1J7N31WZjax+CCkAF6rl+sAArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCnApHQ57sn4vd4LQ8RmPk3ZpCWjJXPrz55940OWlAjS+YA3b4nVJHKpNDtPkcYbfgZc1zaEQGTBEYj7hv7zJEexY0YmNrH4IKQAXquXywACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKXEHW+sa/cHmaERfKWHX4oKouXFqHrcyV4gKweFkL+W3gICcACfAKACAWYAoQCiAN29q9uRipn3GSsOvnRfBCF5kdIHfcQ//nWVZ4L1XHyYBWNrH4IKQAXquXxgACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKc8meXjN9Q7XJCbzw6nblxSpLjFcBAQsC546XcS3E/F9gA3b2/YM+tLxDsQg1GYNmKQ6EQWoZ6rGOickB18VW5kf01Y2sfggpABeq5feAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApg/GS46B3w0ga+VhLGImF8NJV+yayEZ9U2UB8j4KvqVqADdvc8oHiH7+xXbOVkadYqIXGZ3ppHbbw+p1FBrnGSw53ExtY/BBSAC9Vy/mAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBTRwty+CYt7XjqLcNVFpbkpAMQb7f1wyJrn0HJp1JxNh8AgEgAKMApADdvY3RVEesXDsKye2euuOzLP482lRCv854Q0Q6NTcB6zRjax+CCkAF6rl+8AArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkpwVe2HF4wh9rdu0Xj0QSgyda33bikdxCnWFgjGrPKZIAN29qCYZhC0iV/sZCXyZC3eBjyNS44CbnBebO2aYm44Bw2NrH4IKQAXquX5wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKdQr4KO3y6JkVrBeEpvLpW1L5i3oeWpQ2ujplZtAcrsfgA3775zQC9nc5Ku/ddBp3KfxATmZ5m1pvHhVM9og9cZes60xtY/BBSAC9Vy+eAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBTMKB9LIexfdzFurQmCv3lKBdG0yKYSxFemN5Qn0GjzD8ACASAApwCoAN6+he1cWkirrcW/SoUYX3gapg7r5+8gZC9mDH6Q1IGYTPY2sfggpABeq5frAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApYJXA4Z3XjL6YTBnRR3tEWTHR5ghohWUHQLB0Vz+SRD4CASAAqQCqAN2+VugN7jaNSi43BF7BOqG+NJC3ddBl7tthRq1khsLZUcxtY/BBSAC9Vy/WAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSsMbPcNg3DPhbk16XuvtonyRjr1+OqCzwQSW4D5cdMMUA3b5bg7TN8g0M2fFe/K+bTIoDGHOjYFHT2o0EyHKDkoBpDG1j8EFIAL1XL3YABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFKYPrwso0eQo3pbn5ho1lT1c1YnkUcQ/SQVXVXlZkt6HwIBIACtAK4CASAAvQC+AgEgAK8AsAIBIAC1ALYCASAAsQCyAgFIALMAtADevqZgF37BWMBWdrOWuqtF+PimP3Sg6vGnz+ARx+6gzYpGNAFBlpgAXqvKNwACszOlYiJxt3l+yYwp5/va9LNJcwy5PnVdg/CPI9RlXRQMsX7YTBeeZ86uXcpTKooQbWait0XwBmP+ikDeJiz5inFtAN6+k2BYN26H9kgc4xsOCII1tL6d8AFFyXCBxFoozOZMaEY2sfggpABeq5fvAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqAp/2o/nJ5enXZwxcTSTW2vXSE4iVhNYq2YKow0LR4DwskA3b5fqqH55lU9TV3lK88RyHKc6J4Sc1a/UXIBKu/E6UjvzG1j8EFIAL1XL9YABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFK0+8bR/zxqkDeCY6C4zfQAG2iHPliU0yx7CjquZ34vzQDdvn/ArVPT9fpI1R9GBLh7DhtY1gPtFbkI/gZO/VkgyKXsbWPwQUgAvVcvzgAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU9aMq8m/y0+U7Iss6ZfJXEizr7K/zfj/V1Fub8+N4gOfAgFiALcAuAIBIAC5ALoA3b4I3NdicudKcUwG4fwDT96QqpvAyAIZHPlC347i5rLEmNrH4IKQAXquXvwACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKRc5Zvqfnq2BDc8Bp3x1LPVOjKziXjY5gc1JMnRITDCRgDdvhwAtWHLWKHb8TK53HEcuzsrgtQgg12BxinU6z0MfMcY2sfggpABeq5fiAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApW0lMbTIAZK6oDV1PC8XExd1C6PXCvOoJ/2tkA8aNObeAgEgALsAvADevrC1wDHs6drf0jxjpB5OfxrkE4sVf/dYjCEIOFPVhXiWNrH4IKQAXquXuwACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQ87BfJiwd/TRQnLYgPneeuz7bkRUy9GJ8avxnRMCkarAN2+SPdYqx0kt7D2K1PRgO++bKb834pauEAyAJgYrh+gTMxtY/BBSAC9Vy/mAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBTjf0Sj4bEAVQqAVk6HoQ6eAksjDCxXJsUZwGZsJjAZQUA3b5WLqbZDrMrnCieLu4488/TrVuVVM2ZzJKPkaL0bHRbrG1j8EFIAL1XL+4ABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFLqVORz3yQ4elYr2tIc/scWACAXlmtAeM86OT5yvLAncQIBIAC/AMACASAAxwDIAgFIAMEAwgIBIADFAMYCA3kgAMMAxADdvkeI/zhdOlsPs+P5gGpd5Ron5kXwAHqEIXZm0lzv+zEMbWPwQUgAvVcvzgAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUu/4w+NE+/wVM6ab4bdGh0aLa+2C7o1tr+qaNsqJ1xLFAN29H2uP6r0dIyLzSN8p+zl+Lzd9rYNshYlLF4pp/ajOkY2sfggpABeq5frAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApWNz4ghtuorqRLmwE5+BkJ8TmVoGqY+260TTmJGYdx/uAA3b0dV2Q1iFgQJtn/yhYTygACH+QY9drMkDE0/lPCIDEFjax+CCkAF6rl+sAArMzliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoClFxISGaUgYSEqrq6B6w8or87NHI8FKuldgXSyiZ0o95YADevrImmw6pNARqWTmb2CT2p/rkx9aWuxY+G9I1y8IaorVWNrH4IKQAXquX8wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKb+ShpK72zo8MXJr8lmIwjtEZt1ukMWekpIXAAe8FcSBAN6+n40gO+usfWKYQMoKcEy/+SYH1r9Ti8matl+tpqezxlY2sfggpABeq5frAAKzM5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApKQrGy4YfeDDuuqRKMEnYStDcuZcEU32y/wumfSU+8z0CASAAyQDKAgEgANUA1gIBIADLAMwA3r6v5EJMnfIRsdLpL3qRiJqsZDxgXeRY+o8q+QU0uIVlRoy/eTKUAF6rj8IAArMzT1MKlC8+vcC2Ajh2KH/FEAe2roVG9GkrOoeHZT3UFQTZa12VV9UhyTBCo464QncxPQLLe87mknNCVw4u0P37aAIBWADNAM4CASAA0QDSAN29/OUYWNTkYZ8Zwx2CbGgDxu7fy6UPQzNoOXAgHky/kLG1j8EFIAL1XL84ABWZnLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFK5GwjGUhg/a+SCBAtRqu/SzSeNIoF6POshkZzcbjG/4wCA3xoAM8A0ADbvHjrUIWS2Isk5KBW4CFPHlPPoBWsk+7q26I5GmQ1L4xtY/BBSAC9Vy/WAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSKcpCjPTL9o4PfJEyRJKxy3/ogMau7Opt9uLFv6tLn40A27xRAasHCX9euQc89soF/vzbgjmHFO/so5KR1QjrRv7sbWPwQUgAvVcv3gAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUvBSrR+YxiHbdbGASxr0x730wK/vr7PzIilpTwdzXbHDAgN8uADTANQA3b4wO7ZKNBZ7JyZ/VXvTfnM2qMGJuFLgz8GSTYJm4CI4WNrH4IKQAXquX7wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKTkWYdKSbz3ISHQbX5DnoW5OtG8EBG2KHyI/wC7kjO/9gDcvIQRaLsiPwPMAfWTRHTlauOsAwegSKsWcybH1lXCXbZO+HOzggBeq5ANAAizM9c6nDWCJkwPOEmiF0i8TgsDaGUT07VFTGptCqw14Ybls9NjdBfOiBFQcYC1ETwo4e0pHBZR9/ntV72ljnuTpkEA3LyjBcxEQE2uiajztXfPlMNnqyio68gaXFUdOTA+JUwmNrH4IKQAXquX6wACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKV98ti11IqKE2BD1uMLocEFTDFQ5VoSQMm+4eX1dstv0AgEgANcA2ADevo7UN58csTFXs01QMBplq0fcNFL0zQ4qLY4LM6BzUPQ2NrH4IKQAXquX3gACszOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKXB+lPQAbnFs2g5tBkyrUimFVB94oF0mMmaHEhRnA6KuAN2+VExuvO3j8f7sbw6g/8XAPaSvY01i7lyQZwwNDFUBtIxtY/BBSAC9Vy/uAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSnFiEAg9OIQXoXMRoJuc025RmJ1/A0KLcvVuYw0ssjXUCAVgA2QDaAgFYANsA3ADdveaKyrcjNR5KLkX3NzXjXtW1jZdgYD0d0hiEi8q6jfextY/BBSAC9Vy/EAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBS0th6A25mHH5k8rlp2ic+t6k4VKWUSIYvPwgp6YoBvykAN29TiDjVQ/kqMu1GTF71jZILLaULSmLTVc+s9avnTyKOsbWPwQUgAvVcvfgAFZmcsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAU/gno2R877xEExBHk6iJHrigJrCknHYdPsgj/aiEjMCxAA3b1N5qNHZPwuBUQ3+jE+e2oKfz4BE2Tc/gpDH1D3JLxcxtY/BBSAC9Vy/uAAVmZyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBT84f0859R9KhE+7Y7cyw155fnbiAvw8mxATRsTi1Cvk0AIBIADfAOACASAA6wDsAgEgAOEA4gIBIADnAOgAT793B9naosSjiq3teMIT0fmIQcza5TagoZ0F7VMYuV3qNseN+OI+s1ACASAA4wDkAE+/G0PcutMMQGDkS6T2Oabu0jPMovm+F92knW+17kqS8sWLsq8Zo9QgAgFqAOUA5gBNvkts1TqQU5P4t2oiVUTZzpK9b/mbdrZUvHThuSGMT7eMXZV4zR6hAE2+Wpo0Lh+Tbj1B5efCuUntUQVaW4cQ/bGlNDQr/wT0xQxdlXjNHqECASAA6QDqAE+/YE2JilfN8ihjj0PMuE4B3ToDZ9CY1lCxZ9FO+hy17yrFvAgKyGkwAE+/DFaMbMT6oxNn1pC2BJIZHGgadFgCWW1xTG1J84KPX72Lsq8Zo9QgAE+/BjfhLp8uKbgycWNBnDMe7czcgrK46yFT4Q98q0HsEw2LN+7RruYgAgEgAO0A7gIBIAD3APgCAUgA7wDwAgEgAPEA8gBPvsU8cKos11fNewFvZzE9V1xIi2xWS/j2qQl+6cJEeVwDF2VeM0eoQABPvu7carjBNhL/nxUuhzWLeF+23UptW4grXKWdUeVRNKz7C57gi9oAQAIBIADzAPQCAW4A9QD2AE++3PC5xF59j4ljGR9p5vN6qRZKb/k8pQDFIppQSttjffsQ7wy0bxJAAE++xjLEtSD5XNM/Mu4fTXC5O3WqIgkuDw7/5ilYQcMAvJMJGDClOwBAAE++VnSFeIBtvuhv2bNKYLvOqTJaSv8G74cJk8wqrguCPe4C27Tl0x4pAE2+XascoCz2tfhC7ZrU+GMOgwCR1Tpfsx+Ld47ArgOJEKxdlXjNHqECASAA+QD6AE+/cp+9u4LB7hybjqpLL+eSTWd/xRre4vgi2XRJYfB3tRLewLp63EFwAE+/CseoFwjrTUBY8HR8nEtiS2JiQHMzUiOggdVmpMEaE7GStSmVI84gAgEgAPsA/ABPvtY8kMWhZ4/7ohS3AmfvDCzIiDpo0SoNeqc2FG4hxhTbFtiu/n+GwABPvtCv34aQjXGWWKe+WwYEBV00NS/niDBKsM9waIHDODDTF2VeM0eoQAIBIAD+AP8CASABAAEBAgEgAR4BHwIBIAEgASECASABAgEDAgEgAVIBUwIBIAEEAQUCASABBgEHAgEgAQwBDQIBWAEIAQkCAVgBCgELAJ6+bmMp4yV2I2LttkHB39R8+TXob1JGK6c1tbz8D0Of1VLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAJ6+eblJQFsVo6QvvL30zPVYC4F9ddE5lCPduwbohFiTfBLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAJ6+cCVU6azqmiK3rp0gOU5Ie4sdFaSzDuPz1X5ruCXaOXLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAJ6+dGAvCSTUpuMDcONP7VGeRNrAFjs4DddyimsS7qi2CUuLpB+mhHbBc9MOtbl3+Cc2/UwVd8nHPamFSwW86XbNIASXSe1HuyosbvVWqaAAAgEgAQ4BDwIBIAEUARUCASABEAERAgFiARIBEwCevn5CAJRUEtc5iMDEdKX6iCAstAqZUpwsJpy22tN0Iz/yxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAACevklqgvYblINA1QatQqnCI9E0U//Uz9tsFXuyG3PGPBsSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAACdvdO2CPRtjUN2fPH1doxyClPsgPev7sisAdvaB8pBusbLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgCdvffcK6dhtD2qi5f8iL3zi5CLca8vxT+88rWB+CWhxMNLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgIBIAEWARcCASABGAEZAJ6+QLwWaCDkKb035DVHppKzGcRik4QZIC7t0PLwnyKJGxLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAJ6+dUnZMXnrrqcJC/JB8cQcm96F55YFunPEw1+NzrupeJLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAgFqARoBGwIBIAEcAR0Anb2JlbDwncYxEe+ycLoKbBaqY+K3ktTSaGy5gH9UhLeLliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAnb2yR3HXo663UETtZ4xOi/LSuRnwOF3zkjlV9t9UEUYSliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAnb4R9aS8zEqz8hkPkaO7pNpaU0HpIHW5VReC1T/WuIJl5Yi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEAnb4q99OYNmGrYK7C9a5TWjh5APTsAagONRpPORPkuhqHZYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAECASABfAF9AgEgAaIBowIBIAEiASMCASABNgE3AgEgASQBJQIBIAEuAS8CASABJgEnAgFIASgBKQCfvqyxxGjD4Vrbo4DnJc2y2tCwB2RTkofJbpSth//aUhspYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEAAn76+acIpX5kJBeJesy8djw6ficlhRsJbMUXOcLv2hWKbKWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAAgEgASoBKwIBWAEsAS0Anb4QSi+B6H8Fzq6EliUUuPcB9/1TgzglmgqH1GS6mAHiZYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEAnb4xiYEkWucw/eYfeHPmgBXLeDE0eqQ8NqSgXQ7DVfqcJYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEAnb3rmImGO9WOyszNnbAXnr6eGWZr6LzFuk7jtjQNwXoJSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAnb3le8S/TLLT187Ds/yl5krNnAvNYft+SLH2/zGKmk+MyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAn779S6dRP2e/3oxVbWHZh8W2uMXtRZs5yz+dce3gdZjB9LEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgAgEgATABMQIBWAEyATMCASABNAE1AJ2+OAfMXdY4rYsjTpTOsNzwdypNtqcVWMpVaBiNXfLQGOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAJ2+LQ5h/wNz8P9RMAqlvUByKsBNKk2yTBvv9elQpBz3sOWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAJ6+ZYUnMVY4iMsUNIBJlseSIrWt3IU46A5Z4lhNE6xEDLLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAJ6+Tn4y/y6kG7kow2SqEgDpIicmM9ydEPfm+wUsSlVHuxYqxiP3epnykbPKSw76XfIylV26slUOAKPJpK5vXSGY4AOczFJSxwPsV09GsGwAAgEgATgBOQIBIAFCAUMCASABOgE7AgFYATwBPQCfvo5WaFBwIyGoa64JRXzUmnQ61hCE7nY4X1NGDm5LHGLpYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEAAn76e2OSNUQAHecqRX0HzH5JGR5S1R+tfeEtEgZ3ZcOuiuWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAAgEgAT4BPwCevmLnqSh512cCpau42jdhhhwrjfoz333PiFllg0FFAOSyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAAIBWAFAAUEAnb4OOzSDFNmi0QjI48B72ERa6qJLdUS8EblpNHj7qkq8pYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEAnb2ZT2mi1pPgVfj/G1DMCUEvAqij4FCVnbSC84evOiRxliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAnb2A53kKZxk/tJMsWBJE6AUmVMJaucWpJptaBuWZxzqkliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQCASABRAFFAgEgAUwBTQIBIAFGAUcAn76GCm5wtenPSmicSQuru0zYNPn7fSr0mTcxe2LGfCa/2WItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAAJ6+V7HTvrE3CeheblFFEbXEOWLZSsTu5h2yJc124UqlzpLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAgEgAUgBSQIBIAFKAUsAnb4T6+XLmY7Rdypwjy+iDLryoqqSNIO0CZNqXihCCMyApYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEAnb3q0HbgQzrl/2ZhJcdjnEbcf9x11GAzUfFGSdBX5xtsyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAnb3eP0PjdYf+adZMpZHC1OcTUXLa+ssiDc3bFD1MZH5PSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAICASABTgFPAgFIAVABUQCevlG8mHoe6K9IBd5gp5OBvbmHuqKO2fxJMIcRF47Vbc7SxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAACevmRtVmqa/71dinJHYKJ1u2Oxp78OPaxbwopjfCidU7jSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAACdvjrodu5c4hSH/3JNYkn2ThPMuZ2Ft99ttHT2TQPZeYXliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQCdvj6IvLeKBnRgbwShVBvoPCPt0zLpGll9YPaV20ecdhSliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQIBIAFUAVUCASABYAFhAgEgAVYBVwIBIAFcAV0CASABWAFZAJ++ubcyc7ye6BWKGb52gKxHxDu2aAAd5VSg1Ku/DbJTwyliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQACevn83cXX33tWI1k+Y8Pfv+K2z8DOVg/HnfgiKcLcKPJcSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAAIBagFaAVsAnb2H52+ntHrc5HjLwyatJV+Sr6JBhbMg+qhK1LzUZS9EliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAnb2f4h7EXrMMTteBXETNBde5Ep4duUytKt0QvW+0EGpSliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAn766sEJ8bPnkkLMgNXWfATXyXdkHJpeeFlsvUA8yZcqnCWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAAgFIAV4BXwCdvjtfrEEHRrTWkGp6ebpXxJSMWSfDwxD+kKMOKUh4bh6liLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQCdvjf0bFoyHSj5kWmwZFYQ1ItvJp0saqpNYPs0fnf4VtcliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQIBIAFiAWMCAVgBdAF1AgEgAWQBZQIBSAFoAWkAnr5VBCDZmC+cCRzwweZnJjkbUT6KtzKhRktSJOk1SCW28sRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUgBIauO6dzr2xtY/BBSAACASABZgFnAJ2+A+NX+tdbh8r5n4FVTU9yGG3VLbWOysC/OxWi8FsOGqHv9fe4BkD0bBcjNVL5SP5chUKpPs8TAvELGEenKiYRABWsye3wqheaC9uoIogBAJ2+O4+TyXuwvtRi2/oClk+EBKVDi51t4CP8yhFyxIB6mCWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAgEgAWoBawIBSAFwAXEAnb33p7DiPfcc2LjJLIIzfHmOT1xmpMyBwFLOJrMrdVmvyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAICAVgBbAFtAgFIAW4BbwCdvXVges2GXK62rvBF6Qg4wCWLoSjnkjlRM4yBLI+MZjEsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUgBIauO6dzr2xtY/BBSACACdvNo57o52BC7rx5b1sPkc7bXja+B3RYkAlQyVCAkmffOBNiYpXzfIoY49DzLhOAd06A2fQmNZQsWfRTvocte8qADxUJGxHW/TFshKjJ8AIACdvNuUk+wbrfaxBd1fX+GuMp+G4YzGJA6X+LLLDaO91pSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAIBIAFyAXMAnb2/iGqdwIeOXax4SVw3dvldlkV+imuwFCh1ev8WanUgliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAnb1hfBUYjs9cCclVwvUPGHBbhNujQVjHxW+l9tng9+8TLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgAnb1jj+oM61ro3A4sNhaWsJNK75mU+5PVsFU2zGZkVMv/LEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgAnr53UUWiTPckKgYHpQ8KHS5kWobhsPQ6CRBt4HYfekITeudThrBEyYHnCTRC6ReJwWBtDKJ6dqipjU2hVYa8MNygBneJ6y/0HOycTGlp+AACASABdgF3AJ2+MBsj521A2UtXDF7zRcf9bcjAAAxeThDfCxsWVoU+mWWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAgEgAXgBeQCdvdARSXD8frvXXKxyVc5wNNeShsSH61UKXRT+V45pfYJLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgIBSAF6AXsAnb1XzBckAP5LGqx1ih6+nDg+HOsmJUmXNcFwgZkIGcXFLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgAnb1jHiLth3FHZXZ9rsxvsqHhQnhP0daxy1/IiXYwwvOJLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgCASABfgF/AgEgAY4BjwIBIAGAAYECASABhgGHAgEgAYIBgwCfvsc1YX5HWxzX8E3Jln158eblKJWMzzxqa6EvAHv/x9mMsRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUgBIauO6dzr2xtY/BBSACAAn76LVKbxqAi5VatxIJaKrcVJ8YQn/vdkTY3JadhkezJo+WItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAAgEgAYQBhQCevm+FaE5lY7kHmm85bumVsdH3NOXkXfyMz4PHx5MF8UpSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAACevlKwjSi5zmiQLZPi15z4iZgEUaS4WhLk2jGvFfhEE9ySxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAAIBYgGIAYkCASABigGLAJ2+PkyXKZBeBCRk/nYU9FwzaUp/tn8Cw2Q6WkWDbKRt7iWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAJ2+Mwlog7fDXLaMS0Va2eyuxyuCXGw7GnCDzuOFKC06fmWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAJ++hTChVBi9Xay8VTwqTdD7nIN47Zqb2L49y89yQOYbVeliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAIBIAGMAY0Anr5FynTamn3sPDPR6jU0OuD/X3GacXd8GbB8rjPXBfoP0sRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUgBIauO6dzr2xtY/BBSAAAnr5TAU3FBoJkel1toM2R397MiFvV6KFROLYx0uGpEujnyephUoXn17gWwEcOxQ/4ogD21dCo3o0lZ1Dw7Ke6gqCABKO8OyhixkxwIifdGAACASABkAGRAgEgAZ4BnwIBIAGSAZMCASABlAGVAJ++k4eOpG7W2R5NvRcKCCtAFj5ki4xPFpvSUvzVHx6jfjliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQACfvrKfve+6AtAyMJLjrUjewTnXoZG7pGaRRw+3nyYR+MFZYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEACASABlgGXAgFIAZgBmQCevlwEebNCXtq+KZdDZEN3quMLI/NQ9QOL1YTEkm3wsOUWKsYj93qZ8pGzyksO+l3yMpVdurJVDgCjyaSub10hmOADnMxSUscD7FdPRrBsAACevnGplcJkHpF4l0ciZcHGL/E0LPM56zGyiVnQXl9N82HSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAAIBIAGaAZsAnb4JqUxUU7jilAN7gqx790J1hKvd+VqxLx1jmGEdFM/xZYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEAnb3N5OstoW5ApQqWAoKF3DKWdpKtKpA2VIruDQVdrlFeyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAICAVgBnAGdAJ29TE3WWg5IlP9cfTW740Ga/T9EvI19trIUCsZ+XT3n5SxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAJ29U/3cAdJsCWqVXdfemUlrrMksxYvOAMsVDE1VH03WfSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAJ++50DPAoiVYeY2k71OMOHTKY9Ny3uhILtdnHlufUMtctSxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAIBIAGgAaEAn76ODVkaN7k6DcHIXfSQu3cp6qORdEoDWetgr5mz6dNHuWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAAJ++qH3dOkyDPs8sSzuqQ3X+gZTUzoFPA9yck9FJ5awxYRliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAIBIAGkAaUCASABugG7AgEgAaYBpwIBIAGwAbECASABqAGpAgEgAawBrQCfvotTbDrxmlsFcFglwX4bDp4MYLoh/7JsQWgXq8P3dFJZYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEACASABqgGrAJ6+ezK/M29WEAM1lmU4k8NzaxOD/8HfjMQ0wJklK6J3eJLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAJ6+TrNd7tEvMma/a+zBnzejBitNc2fJjXwqVOZsujXuMjLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAgJwAa4BrwCfvr7trQQQVJuETfQBsaKu7jpDs6v8H7w8xiqgO2Ng7UIdLHkhi0LPH/dEKW4Ez94YWZEQdNGiVBr1Tmwo3EOMKbAB4LPm+0rvFi1iBCtuAEAAnb2dFkTLouzWgFLi2UYOJ0Vyf44K7jOjEM/EFwA84+d0liLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAnb2Re5BSLTanE1JblJxGxSevDfMVHzfP7KpscvsQL9WfliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQCASABsgGzAgEgAbgBuQCfvrRKS3Dtn6NXOIzZsIQHa+94rdDUfcoUP5YLAh9pRq7ZYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEACAVgBtAG1AJ2+Kdf+OWTQneBsjEY1U6m5nafPyrl7C5lvEQm6L41FouWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAgEgAbYBtwCdvcYZjUwtWi+m5bem02LaoykTdn173tYMvU/vivcD3DPLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgCdvckIX3tYLjFEp4kRkZIHkNI+ZZ7Sezcpl55RJ7E3Ea5LEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAgCfvqCVE3xbJbz7emfYVMFKhTJwHOBV0CtdnjrK5JjXg46JYi0RfSl/AfI13TqdYeJJfBLNa3floNoFSZrKi3EqApACQ1cd07nXtjax+CCkAEAAn76TTKqF9TyAWHIGMv4pyAvtfFGJt80gaUVdKEvMaQCveWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAAgEgAbwBvQIBIAHOAc8CASABvgG/AgEgAcoBywIBIAHAAcECASABxgHHAgFYAcIBwwIBSAHEAcUAnb304M4wrwWArVbiYV7Smcw4PPDEBg9UTeH3mZByu29+SxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAnb3GOLsjvaffNnrR4+iXX02Hg4kUNkhID57ZjoW3SKMTyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAnb3Gt0tCrclinJvINr4A6kwP4EWjQ2oOIXAlpUAe5L7kyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAnb31z775gJghyE2pQAayqx4zhzhwioWD/LbSfT+YqW9PyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAnr5fXx9kAGl/KTwyvHCw07pfeSa5gQNnhmNTkTCWt0hOksRaIvpS/gPka7p1OsPEkvglmtbvy0G0CpM1lRbiVAUgBIauO6dzr2xtY/BBSAACASAByAHJAJ2+AcSL+SFPIaRn8n3SuGGYXuW5RIId/UlsqDLRIdJhkWWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAJ2+C2/o61Z2+8ZL9KRQSpY8vs2zYc587H+rvH0zJtstiKWItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAJ++jFrt3dQ00rJMmk9r43NlSXaPiemN7ttVXRDHnUTzapliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQAIDe2ABzAHNAJ29ScEUElqYAOudxEEqJnk2cMMMpqFJz3PvY1zZr/ajASxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAJ29RNOMVp4gWZ3sUJGe9eHea/Wqc0VOawfRmc64fptliyxFoi+lL+A+RrunU6w8SS+CWa1u/LQbQKkzWVFuJUBSAEhq47p3OvbG1j8EFIAIAgEgAdAB0QIBIAHUAdUAn76cK/tB7OSwGRCYrXADqq/RLAi/u9pB/9AHjaIdd4RY+WItEX0pfwHyNd06nWHiSXwSzWt35aDaBUmayotxKgKQAkNXHdO517Y2sfggpABAAgFIAdIB0wCdvj4JTDLxtElLE/6yH4SlUStcLvQWqCAu/83aqKbjHKEliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQCdvgk7u+HH79odPXo1V10BauXk/wgWFMlHqqVL45w0JmpliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQIBIAHWAdcCASAB2AHZAJ6+RM3uJ1pLUi4DU74itY1lUigd8QK2wanjcSSR6dlvcrLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAJ6+V3dedI7G9ta1ljr9wtqVLttZrba3WYTsEcHaUaopJzLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAJ6+Q8mbdyjQOhCDZQMJwnJ2P4821w5hl6xoL+kmqUpKzzLEWiL6Uv4D5Gu6dTrDxJL4JZrW78tBtAqTNZUW4lQFIASGrjunc69sbWPwQUgAAgEgAdoB2wCdvj79gn4N1wIJKxUMwxTtF9uWTVnvbY245au9S6TLjWpliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAQCdvhLRzTRUd/Yp98Qg2iP6aiOQFiuzvHh5w5PgJt9NeZkliLRF9KX8B8jXdOp1h4kl8Es1rd+Wg2gVJmsqLcSoCkAJDVx3Tude2NrH4IKQAZzonb4=" abiUC := abi.NewAbi(tvmUC.config, tvmUC.client) electorAccount, err := abiUC.EncodeAccount(&domain.ParamsOfEncodeAccount{StateInit: domain.NewStateInitSource(domain.StateInitSourceStateInit{Code: electorСode, Data: electorData})}) assert.Equal(t, nil, err) valueRunGet, err := tvmUC.RunGet(&domain.ParamsOfRunGet{Account: electorAccount.Account, FunctionName: "participant_list"}) assert.Equal(t, nil, err) sample := `[[["0x0101b6d65a384b9c70deb49fd6c43ffc0f60ed22fcc3a4966f7043794a749228","60138000000000"],[["0x03de5d8590fe6ad191bf94d4136dfb630e9b3447bb2f1a6ae2d8e3e4cbee1d9f","61000000000000"],[["0x0558f90c0682d677b46005ce2e04206c255ea9a05bfac0ff5aea9d7182a28913","60138000000000"],[["0x07698228973a595751d79e1fafd5a4145b3d35349bf0b43322afb61b138f01eb","60138000000000"],[["0x09d1ef8a40a9fbf1ca505f072258048ec15e0637baa085649d77b9a90220003e","60138000000000"],[["0x0ac21ef27c8ed4487270f1c45e99dac091ca4007951217ece344452df7047e5a","60138000000000"],[["0x0aed529418ab67a31a4b98c224f8fdb2fec11f0100c23751e67b312cba11fb23","60138000000000"],[["0x0bd14cdade9067c523f44fd208dee5daa7d852151725d713b92c840a031018a6","60138000000000"],[["0x0bddff0d98f42a3155e577a5579623a911e3b03401835166553f88375cbd9657","60138000000000"],[["0x12893bbd649bf2e1e79cb084025638cdd7906eebca40efeee7fdbd548cc96391","60138000000000"],[["0x15546bc7b5124f6d83d6c5a62b8890a48b933168e141c01229431a6c0c499780","60138000000000"],[["0x1cbea6a399ba200958db255579cda2195006f3a3108b2d6ef7e258e42c101479","60138000000000"],[["0x1f8ee6ba2902715804c769c3845b6b3a37802e462e8df63ba19f827a92dbbda0","60138000000000"],[["0x1fdd556d84d1d9f24a739c2600ec72256cc00920d85ad3a2edb3e0d72146789d","60138000000000"],[["0x20f20c2cfa4d72afb9c5f64d4735070962b3323b3629892c75f56427f175ebe4","60138000000000"],[["0x219d32737b0f3769869b8fa750ba8e3cd9f19b21a4d669c7c79d420d7f7cdb24","60138000000000"],[["0x2615b4aeb69140531228248a9d84593117b64e22d462e3968e39c1840a260523","60138000000000"],[["0x26984c9f04bc1889061e98bd9caf6955f750219d8e8dbc0986feb9d770e5a15b","60138000000000"],[["0x28bb07d80e20aa624ae47dfe53f915b23a666bf825ff283bac06c14bea1eaa74","60138000000000"],[["0x2a23566008fd4f87105b09d02c739452e45187fbada5e7e52ada356264cd6751","60138000000000"],[["0x2dcc70859876106b21b598ba9a10c9932259c36f44adeb95a178e67f6afd2f7a","60138000000000"],[["0x30b854226ef943d738d2dfcc72ede3b39d08604ca7211abf3c76f488441c77fc","60138000000000"],[["0x31bb74a5a53769d3db789d961375ea569d4da0bd6ac2b12f830dd6be81968ef1","60138000000000"],[["0x334f22e0de2e24a070fec7c1d77d7a988a79d66b79e2d654310a963964edd337","60138000000000"],[["0x36c44eddf773390cdb42f93f8454ea9c7ca45aa8948346df8f642a59ce44c442","60138000000000"],[["0x3d29d2b5ceef46703255ce8cfe3aad3c4fefd3a2025e5a48ce78a63f20887eff","60138000000000"],[["0x41b047a20ed691e9376f7f2f60d6571290e34ef4e1b85467dcc3d7c0cf7fae90","60138000000000"],[["0x41e7541c377b58a0cfc4ca954731e971f6dc9fa6806eaa1709d011d3d32593ce","60138000000000"],[["0x426a52d3b3d016451c46b3a0eacb382fbbb38739e00d041d4038f795a54e25b5","60138000000000"],[["0x42f89915ca540af691f623f201b616caa7f5e104f8293698f8b46c4e7bb5b292","60138000000000"],[["0x4449521e793b02b036ce698c3af951e9548cd5b862b704fa5cc9e80b171a3c61","60138000000000"],[["0x492f4fee6a035a09e9ff09d65a65768899b04797cff08dc2c64ae11cd94d1968","60138000000000"],[["0x4947018f9c0c9302b2783eb5edacb76ccae3b5c5a2f6355b5b51afd1a18075f8","60138000000000"],[["0x4c27708e4ce81a0bbeb315ece024ba495f3e3fab5f83a2941b7731a58ad32160","60138000000000"],[["0x5059d40f80f578c3c384239415f54af35ab4dbdea0251618d4c3c7b4937e7e69","60138000000000"],[["0x5191f8cbfe1ce25a68c337ede75638321374112b868584092a335f83caad59a4","60138000000000"],[["0x531296c32ea64d09dcb44ff0b99843dc9855143c70b9fad42deb33881525fb84","60138000000000"],[["0x54c9860aa34ddba2a16e4f4271e1771f61f1e8a7a116fbbfa62f0e535b95559e","60138000000000"],[["0x54ce2d6b35d0d670e37fcd533ff17c2116e0acead719194e46d478944b33108e","60138000000000"],[["0x576af5e4af963a0caa957629d009906119e418e7f7778f5a55d41c0905b73a4f","60138000000000"],[["0x59905476f4781f6a79359079bfa3fe295c65d6b918afadbc352edcdb558ad094","60138000000000"],[["0x5a4e95cdf94bed240ebabded084b70b2548601686d94a751f240aedd2032e4f2","60138000000000"],[["0x5a7500f11becf6741fe5624d2298f6b830ead261871a48a81f80bf9be09ed866","180000000000000"],[["0x5c26942bf33c49485db3b2693e5d582708b44705f712c4e24af1ee84744c079e","60138000000000"],[["0x5fcdcc107e81ef4399c9d603a25fcba75cb78f1fa1bafd3acb39e3521d7fc9ce","60138000000000"],[["0x6107f5b2974fabf6f0aa1a7898340b3f76c4ba272b95a3e4bb809c1d529b6997","60138000000000"],[["0x647b9a476f733ec5ee9cbc0bfb021335cd3166b9aaa8ad27ed0f88d9f6bf9dbe","60138000000000"],[["0x658c461d8dad54a5a9cbbbb2711920a541ba58003e7029cc228dfdfbc17ede3f","60138000000000"],[["0x661336351b889e0124fbc19f9f35f6a0f6e8c4fa9b89e9ef527718bd6aa254be","60138000000000"],[["0x6852746bbfb41e556daae99d375b2839ad62b35355c3b9fbbd54b4946ae2050f","60138000000000"],[["0x68ad3d98642913848b605dbad3f1df971f21908d360c37af4a493e9b4646b45e","60138000000000"],[["0x6c07c6be93940a83b30514b21531fd3dd204bb89e7f77b5a2421a41d4e85c74d","60138000000000"],[["0x6d4ad504054f292b7f66c7ed32f3b123bfa5c7be9c45faf26d77ad85efa64a38","60138000000000"],[["0x6e77d45d07651565be5cdb11b80c91fa18def0a434f246c0b25bb50fc4877dd4","60138000000000"],[["0x738600c570c19ef1b91bf2cd83709d71899c246bfabb5b08dc70fe32b5c81f7d","60138000000000"],[["0x7469b663b9fa7be185aa1819bfb48a4eda6e4d8af33e1955d95fa5e156d50f12","60138000000000"],[["0x77410e09363239b0999198a701e37f75775cc55049ba541497967f5d8ba74ef4","60138000000000"],[["0x781c96175cf45b791142326964347095fd0fbdd3c8579c42cea108798e025152","60138000000000"],[["0x79b43e9c18241636ae7c554097bb4bc5da03249bee67abe5366a5b093b708cab","60138000000000"],[["0x7ad807b91790868497768476e8c8e6b53ff9b1a91fbfe6a7edae8de0307a8157","60138000000000"],[["0x8308ff2b214d509d3781d7361a7ccb5f4fb976f8e386ce3c9082bcad8805d13d","60138000000000"],[["0x845a0fff44669c941475eb3f3ffd6e065ee94cfbfdcb820877744d6f9647a5d0","49899000000000"],[["0x88700f083f3bc7971c348de8357ee36b2551d8cdb7ea4b4e4e8aae558d67a231","60138000000000"],[["0x8b08c457cac18642f49ab7de0ef7551b93e11dbc2979062f22b271b890e8d2f0","60138000000000"],[["0x8bc840e0c5a98e608e70307ada41aa94a745a51f6065111942021a4e601dc328","60138000000000"],[["0x93e518529faa2244ee1bdc24a5459d4b3d2047f8756b12636e2cba3b766ec201","60138000000000"],[["0x993d90fac526bdad11549104105452e9198da8d485dbb4af17b044a721fa8b82","60138000000000"],[["0x9997880b1dcc011ce4fefeb587eca16c027c81aafeb4305d3a1755182c269b5e","60138000000000"],[["0x9d998de650f13c85da4ab08de0fa7960771d4269081fa1ed1f9940c5cd8bb57c","60138000000000"],[["0x9fab138505d28c3c2d68509c5414abe933ab7de90610d8cc84edaf380e739f48","60138000000000"],[["0x9fd585f4d71c50ff54b69255ddbaa4a30eae31cde2d02ba6d4c0f87faf288f9a","60138000000000"],[["0xa42d598e3d6c051880488bfd139705c9853ff2e93046c6e096eba5f5b8dd714e","60138000000000"],[["0xa6e3ff7b1f340f7d02a1b64ade185c9039cd2751ef47ac5d7950b527b377d566","60138000000000"],[["0xa79d52472a9343b4f91c61b7e065cd736844064c11188fb86fef32447b163462","60138000000000"],[["0xa82bdb918a99f7192b0ebe745f04217991d2077dc43ffe75956782f55c7c9805","60138000000000"],[["0xa87f60cfad2f10ec420d4660d98a43a1105a867aac63a2724075f155b991fd35","60138000000000"],[["0xad1e503c43f7f62bb672b234eb1510b8ccef4d23b6de1f53a8a0d738c961cee2","60138000000000"],[["0xad8dd15447ac5c3b0ac9ed9ebae3b32cfe3cda5442bfce7843443a353701eb34","60138000000000"],[["0xade82619842d2257fb19097c990b77818f2352e3809b9c179b3b66989b8e01c3","60138000000000"],[["0xb739a017b3b9c9577eeba0d3b94fe2027333ccdad378f0aa67b441eb8cbd675a","60138000000000"],[["0xb85ed5c5a48abadc5bf4a85185f781aa60eebe7ef20642f660c7e90d481984cf","60138000000000"],[["0xbcb7406f71b46a5171b822f609d50df1a485bbae832f76db0a356b243616ca8e","60138000000000"],[["0xbedc1da66f906866cf8af7e57cda645018c39d1b028e9ed4682643941c940348","60138000000000"],[["0xc2660177ec158c05676b396baab45f8f8a63f74a0eaf1a7cfe011c7eea0cd8a4","57180000000000"],[["0xc536058376e87f6481ce31b0e088235b4be9df00145c97081c45a28cce64c684","60138000000000"],[["0xc8fd550fcf32a9ea6aef295e788e4394e744f0939ab5fa8b9009577e274a477e","60138000000000"],[["0xcbfe056a9e9fafd246a8fa3025c3d870dac6b01f68adc847f03277eac906452f","60138000000000"],[["0xd023735d89cb9d29c5301b87f00d3f7a42aa6f0320086473e50b7e3b8b9acb12","60138000000000"],[["0xd17002d5872d62876fc4cae771c472ececae0b50820d760718a753acf431f31c","60138000000000"],[["0xd847bac558e925bd87b15a9e8c077df36537e6fc52d5c2019004c0c570fd0266","60138000000000"],[["0xdab17536c875995ce144f17771c79e7e9d6adcaaa66cce64947c8d17a363a2dd","60138000000000"],[["0xdf0b5c031ece9dadfd23c63a41e4e7f1ae4138b157ff7588c21083853d585789","60138000000000"],[["0xe087dae3faaf4748c8bcd237ca7ece5f8bcddf6b60db216252c5e29a7f6a33a4","60138000000000"],[["0xe09755d90d62160409b67ff28584f2800087f9063d76b3240c4d3f94f0880c41","60138000000000"],[["0xe23c47f9c2e9d2d87d9f1fcc0352ef28d13f322f8003d4210bb33692e77fd988","60138000000000"],[["0xeb2269b0ea934046a59399bd824f6a7fae4c7d696bb163e1bd235cbc21aa2b55","60138000000000"],[["0xedf8d203bebac7d629840ca0a704cbff92607d6bf538bc99ab65fada6a7b3c65","60138000000000"],[["0xf179ca30b1a9c8c33e33863b04d8d0078dddbf974a1e8666d072e0403c997f21","60138000000000"],[["0xf199c75a842c96c459272502b7010a78f29e7d00ad649f7756dd11c8d321a97c","60138000000000"],[["0xf19a880d58384bfaf5c839e7b6502ff7e6dc11cc38a77f651c948ea8475a37f7","60138000000000"],[["0xf25841168bb223f03cc01f5934474e56ae3ac0307a048ab167326c7d655c25db","86829000000000"],[["0xf25e305cc44404dae89a8f3b577cf94c367ab28a8ebc81a5c551d39303e254c2","60138000000000"],[["0xf3c0eed928d059ec9c99fd55ef4df9ccdaa30626e14b833f064936099b8088e1","60138000000000"],[["0xf6fe4424c9df211b1d2e92f7a91889aac643c605de458fa8f2af90534b885654","154754000000000"],[["0xf8a26375e76f1f8ff763787507fe2e01ed257b1a6b1772e48338606862a80da4","60138000000000"],[["0xfb471071aa87f25465da8c98bdeb1b24165b4a1694c5a6ab9f59eb57ce9e451d","60138000000000"],[["0xfb66f351a3b27e1702a21bfd189f3db5053f9f0089b26e7f05218fa87b925e2e","60138000000000"],[["0xfbcd15956e466a3c945c8bee6e6bc6bdab6b1b2ec0c07a3ba431091795751bef","60138000000000"],[["0xfced4379f1cb13157b34d50301a65ab47dc3452f4cd0e2a2d8e0b33a07350f43","60138000000000"],null]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]` assert.Equal(t, sample, string(valueRunGet.Output)) valueRunGet, err = tvmUC.RunGet(&domain.ParamsOfRunGet{Account: electorAccount.Account, FunctionName: "compute_returned_stake", Input: strings.Split(electorAddress, ":")[1]}) assert.Equal(t, nil, err) assert.Equal(t, `["0"]`, string([]byte(valueRunGet.Output))) valueRunGet, err = tvmUC.RunGet(&domain.ParamsOfRunGet{Account: electorAccount.Account, FunctionName: "unknown_function"}) assert.NotEqual(t, nil, err) assert.Equal(t, json.RawMessage(nil), valueRunGet.Output) }) bocUC := boc.NewBoc(tvmUC.config, tvmUC.client) t.Run("TestRunExecutorAccNone", func(t *testing.T) { runParams := &domain.ParamsOfRunExecutor{ Message: "te6ccgEBAQEAXAAAs0gAV2lB0HI8/VEO/pBKDJJJeoOcIh+dL9JzpmRzM8PfdicAPGNEGwRWGaJsR6UYmnsFVC2llSo1ZZN5mgUnCiHf7ZaUBKgXyAAGFFhgAAAB69+UmQS/LjmiQA==", Account: domain.AccountForExecutor{ValueEnumType: domain.AccountForExecutorNone{}}, SkipTransactionCheck: util.BoolToPointerBool(true), ReturnUpdatedAccount: util.BoolToPointerBool(true), } result, err := tvmUC.RunExecutor(runParams) assert.Equal(t, nil, err) parsed, err := bocUC.ParseAccount(&domain.ParamsOfParse{Boc: result.Account}) assert.Equal(t, nil, err) resultSt := &resultData{} err = json.Unmarshal(parsed.Parsed, resultSt) assert.Equal(t, nil, err) assert.Equal(t, "0:f18d106c11586689b11e946269ec1550b69654a8d5964de668149c28877fb65a", resultSt.ID) assert.Equal(t, "Uninit", resultSt.AccTypeName) }) t.Run("TestRunExecutorAccUninit", func(t *testing.T) { cryptoUC := crypto.NewCrypto(tvmUC.config, tvmUC.client) keypair, err := cryptoUC.GenerateRandomSignKeys() assert.Equal(t, nil, err) fileAbi, err := os.Open("../samples/Hello.abi.json") assert.Equal(t, nil, err) byteAbi, err := ioutil.ReadAll(fileAbi) assert.Equal(t, nil, err) eventsAbi := &domain.AbiContract{} err = json.Unmarshal(byteAbi, &eventsAbi) assert.Equal(t, nil, err) abiValue := domain.NewAbiContract(eventsAbi) fileTvc, err := os.Open("../samples/Hello.tvc") assert.Equal(t, nil, err) byteTvc, err := ioutil.ReadAll(fileTvc) assert.Equal(t, nil, err) tvc := base64.StdEncoding.EncodeToString(byteTvc) signer := domain.NewSigner(domain.SignerKeys{keypair}) deploySet := domain.DeploySet{Tvc: tvc} callSet := domain.CallSet{FunctionName: "constructor"} abiUI := abi.NewAbi(tvmUC.config, tvmUC.client) deployMessage, err := abiUI.EncodeMessage(&domain.ParamsOfEncodeMessage{ Abi: abiValue, Signer: signer, DeploySet: &deploySet, CallSet: &callSet, }) assert.Equal(t, nil, err) result, err := tvmUC.RunExecutor(&domain.ParamsOfRunExecutor{Message: deployMessage.Message, Account: domain.AccountForExecutor{domain.AccountForExecutorUninit{}}, ReturnUpdatedAccount: util.BoolToPointerBool(true)}) assert.Equal(t, nil, err) // # Parse account parsed, err := bocUC.ParseAccount(&domain.ParamsOfParse{Boc: result.Account}) assert.Equal(t, nil, err) resultSt := &resultData{} err = json.Unmarshal(parsed.Parsed, resultSt) assert.Equal(t, nil, err) assert.Equal(t, deployMessage.Address, resultSt.ID) assert.Equal(t, "Active", resultSt.AccTypeName) }) t.Run("TestCache", func(t *testing.T) { fileBocText, err := os.Open("../samples/boc") assert.Equal(t, nil, err) byteBocText, err := ioutil.ReadAll(fileBocText) assert.Equal(t, nil, err) fileAbi, err := os.Open("../samples/boc.abi.json") assert.Equal(t, nil, err) abiByte, err := ioutil.ReadAll(fileAbi) assert.Equal(t, nil, err) eventsAbi := &domain.AbiContract{} err = json.Unmarshal(abiByte, &eventsAbi) assert.Equal(t, nil, err) abiValue := domain.NewAbiContract(eventsAbi) address := "0:8ecb78f3be4bd981ea182079c76519520008d56991d16da40a868170e2efb3a2" abiUC := abi.NewAbi(tvmUC.config, tvmUC.client) params := &domain.ParamsOfEncodeMessage{ Abi: abiValue, Signer: domain.NewSigner(domain.SignerNone{}), Address: address, CallSet: &domain.CallSet{ FunctionName: "listContenders", }} message, err := abiUC.EncodeMessage(params) assert.Equal(t, nil, err) paramsRvm := &domain.ParamsOfRunTvm{ Message: message.Message, Account: string(byteBocText), Abi: abiValue, BocCache: &domain.BocCacheType{ValueEnumType: domain.BocCacheTypeUnpinned{}}, ReturnUpdatedAccount: util.BoolToPointerBool(true), } result, err := tvmUC.RunTvm(paramsRvm) assert.Equal(t, nil, err) type resData struct { IDS []string `json:"ids"` } res := &resData{} err = json.Unmarshal(result.Decoded.Output, res) assert.Equal(t, nil, err) fn := "getInfoFor" for _, val := range res.IDS { message, err = abiUC.EncodeMessage(&domain.ParamsOfEncodeMessage{ Abi: abiValue, Signer: domain.NewSigner(domain.SignerNone{}), Address: address, CallSet: &domain.CallSet{ FunctionName: fn, Input: json.RawMessage(`{"id":` + val + `}`), }, }) assert.Equal(t, nil, err) _, err = tvmUC.RunTvm(&domain.ParamsOfRunTvm{ Message: message.Message, Account: string(byteBocText), Abi: abiValue, BocCache: &domain.BocCacheType{ValueEnumType: domain.BocCacheTypeUnpinned{}}, }) assert.Equal(t, nil, err) } }) }
TestTvm
highlight.pack.js
var hljs=new function(){function k(v){return v.replace(/&/gm,"&amp;").replace(/</gm,"&lt;").replace(/>/gm,"&gt;")}function t(v){return v.nodeName.toLowerCase()}function i(w,x){var v=w&&w.exec(x);return v&&v.index==0}function d(v){return Array.prototype.map.call(v.childNodes,function(w){if(w.nodeType==3){return b.useBR?w.nodeValue.replace(/\n/g,""):w.nodeValue}if(t(w)=="br"){return"\n"}return d(w)}).join("")}function r(w){var v=(w.className+" "+(w.parentNode?w.parentNode.className:"")).split(/\s+/);v=v.map(function(x){return x.replace(/^language-/,"")});return v.filter(function(x){return j(x)||x=="no-highlight"})[0]}function o(x,y){var v={};for(var w in x){v[w]=x[w]}if(y){for(var w in y){v[w]=y[w]}}return v}function u(x){var v=[];(function w(y,z){for(var A=y.firstChild;A;A=A.nextSibling){if(A.nodeType==3){z+=A.nodeValue.length}else{if(t(A)=="br"){z+=1}else{if(A.nodeType==1){v.push({event:"start",offset:z,node:A});z=w(A,z);v.push({event:"stop",offset:z,node:A})}}}}return z})(x,0);return v}function q(w,y,C){var x=0;var F="";var z=[];function B(){if(!w.length||!y.length){return w.length?w:y}if(w[0].offset!=y[0].offset){return(w[0].offset<y[0].offset)?w:y}return y[0].event=="start"?w:y}function A(H){function G(I){return" "+I.nodeName+'="'+k(I.value)+'"'}F+="<"+t(H)+Array.prototype.map.call(H.attributes,G).join("")+">"}function E(G){F+="</"+t(G)+">"}function v(G){(G.event=="start"?A:E)(G.node)}while(w.length||y.length){var D=B();F+=k(C.substr(x,D[0].offset-x));x=D[0].offset;if(D==w){z.reverse().forEach(E);do{v(D.splice(0,1)[0]);D=B()}while(D==w&&D.length&&D[0].offset==x);z.reverse().forEach(A)}else{if(D[0].event=="start"){z.push(D[0].node)}else{z.pop()}v(D.splice(0,1)[0])}}return F+k(C.substr(x))}function m(y){function v(z){return(z&&z.source)||z}function w(A,z){return RegExp(v(A),"m"+(y.cI?"i":"")+(z?"g":""))}function x(D,C){if(D.compiled){return}D.compiled=true;D.k=D.k||D.bK;if(D.k){var z={};function E(G,F){if(y.cI){F=F.toLowerCase()}F.split(" ").forEach(function(H){var I=H.split("|");z[I[0]]=[G,I[1]?Number(I[1]):1]})}if(typeof D.k=="string"){E("keyword",D.k)}else{Object.keys(D.k).forEach(function(F){E(F,D.k[F])})}D.k=z}D.lR=w(D.l||/\b[A-Za-z0-9_]+\b/,true);if(C){if(D.bK){D.b=D.bK.split(" ").join("|")}if(!D.b){D.b=/\B|\b/}D.bR=w(D.b);if(!D.e&&!D.eW){D.e=/\B|\b/}if(D.e){D.eR=w(D.e)}D.tE=v(D.e)||"";if(D.eW&&C.tE){D.tE+=(D.e?"|":"")+C.tE}}if(D.i){D.iR=w(D.i)}if(D.r===undefined){D.r=1}if(!D.c){D.c=[]}var B=[];D.c.forEach(function(F){if(F.v){F.v.forEach(function(G){B.push(o(F,G))})}else{B.push(F=="self"?D:F)}});D.c=B;D.c.forEach(function(F){x(F,D)});if(D.starts){x(D.starts,C)}var A=D.c.map(function(F){return F.bK?"\\.?\\b("+F.b+")\\b\\.?":F.b}).concat([D.tE]).concat([D.i]).map(v).filter(Boolean);D.t=A.length?w(A.join("|"),true):{exec:function(F){return null}};D.continuation={}}x(y)}function c(S,L,J,R){function v(U,V){for(var T=0;T<V.c.length;T++){if(i(V.c[T].bR,U)){return V.c[T]}}}function z(U,T){if(i(U.eR,T)){return U}if(U.eW){return z(U.parent,T)}}function A(T,U){return !J&&i(U.iR,T)}function E(V,T){var U=M.cI?T[0].toLowerCase():T[0];return V.k.hasOwnProperty(U)&&V.k[U]}function w(Z,X,W,V){var T=V?"":b.classPrefix,U='<span class="'+T,Y=W?"":"</span>";U+=Z+'">';return U+X+Y}function N(){var U=k(C);if(!I.k){return U}var T="";var X=0;I.lR.lastIndex=0;var V=I.lR.exec(U);while(V){T+=U.substr(X,V.index-X);var W=E(I,V);if(W){H+=W[1];T+=w(W[0],V[0])}else{T+=V[0]}X=I.lR.lastIndex;V=I.lR.exec(U)}return T+U.substr(X)}function F(){if(I.sL&&!f[I.sL]){return k(C)}var T=I.sL?c(I.sL,C,true,I.continuation.top):g(C);if(I.r>0){H+=T.r}if(I.subLanguageMode=="continuous"){I.continuation.top=T.top}return w(T.language,T.value,false,true)}function Q(){return I.sL!==undefined?F():N()}function P(V,U){var T=V.cN?w(V.cN,"",true):"";if(V.rB){D+=T;C=""}else{if(V.eB){D+=k(U)+T;C=""}else{D+=T;C=U}}I=Object.create(V,{parent:{value:I}})}function G(T,X){C+=T;if(X===undefined){D+=Q();return 0}var V=v(X,I);if(V){D+=Q();P(V,X);return V.rB?0:X.length}var W=z(I,X);if(W){var U=I;if(!(U.rE||U.eE)){C+=X}D+=Q();do{if(I.cN){D+="</span>"}H+=I.r;I=I.parent}while(I!=W.parent);if(U.eE){D+=k(X)}C="";if(W.starts){P(W.starts,"")}return U.rE?0:X.length}if(A(X,I)){throw new Error('Illegal lexeme "'+X+'" for mode "'+(I.cN||"<unnamed>")+'"')}C+=X;return X.length||1}var M=j(S);if(!M){throw new Error('Unknown language: "'+S+'"')}m(M);var I=R||M;var D="";for(var K=I;K!=M;K=K.parent){if(K.cN){D=w(K.cN,D,true)}}var C="";var H=0;try{var B,y,x=0;while(true){I.t.lastIndex=x;B=I.t.exec(L);if(!B){break}y=G(L.substr(x,B.index-x),B[0]);x=B.index+y}G(L.substr(x));for(var K=I;K.parent;K=K.parent){if(K.cN){D+="</span>"}}return{r:H,value:D,language:S,top:I}}catch(O){if(O.message.indexOf("Illegal")!=-1){return{r:0,value:k(L)}}else{throw O}}}function g(y,x){x=x||b.languages||Object.keys(f);var v={r:0,value:k(y)};var w=v;x.forEach(function(z){if(!j(z)){return}var A=c(z,y,false);A.language=z;if(A.r>w.r){w=A}if(A.r>v.r){w=v;v=A}});if(w.language){v.second_best=w}return v}function h(v){if(b.tabReplace){v=v.replace(/^((<[^>]+>|\t)+)/gm,function(w,z,y,x){return z.replace(/\t/g,b.tabReplace)})}if(b.useBR){v=v.replace(/\n/g,"<br>")}return v}function p(z){var y=d(z);var A=r(z);if(A=="no-highlight"){return}var v=A?c(A,y,true):g(y);var w=u(z);if(w.length){var x=document.createElementNS("http://www.w3.org/1999/xhtml","pre");x.innerHTML=v.value;v.value=q(w,u(x),y)}v.value=h(v.value);z.innerHTML=v.value;z.className+=" hljs "+(!A&&v.language||"");z.result={language:v.language,re:v.r};if(v.second_best){z.second_best={language:v.second_best.language,re:v.second_best.r}}}var b={classPrefix:"hljs-",tabReplace:null,useBR:false,languages:undefined};function s(v){b=o(b,v)}function l(){if(l.called){return}l.called=true;var v=document.querySelectorAll("pre code");Array.prototype.forEach.call(v,p)}function a(){addEventListener("DOMContentLoaded",l,false);addEventListener("load",l,false)}var f={};var n={};function e(v,x){var w=f[v]=x(this);if(w.aliases){w.aliases.forEach(function(y){n[y]=v})}}function j(v){return f[v]||f[n[v]]}this.highlight=c;this.highlightAuto=g;this.fixMarkup=h;this.highlightBlock=p;this.configure=s;this.initHighlighting=l;this.initHighlightingOnLoad=a;this.registerLanguage=e;this.getLanguage=j;this.inherit=o;this.IR="[a-zA-Z][a-zA-Z0-9_]*";this.UIR="[a-zA-Z_][a-zA-Z0-9_]*";this.NR="\\b\\d+(\\.\\d+)?";this.CNR="(\\b0[xX][a-fA-F0-9]+|(\\b\\d+(\\.\\d*)?|\\.\\d+)([eE][-+]?\\d+)?)";this.BNR="\\b(0b[01]+)";this.RSR="!|!=|!==|%|%=|&|&&|&=|\\*|\\*=|\\+|\\+=|,|-|-=|/=|/|:|;|<<|<<=|<=|<|===|==|=|>>>=|>>=|>=|>>>|>>|>|\\?|\\[|\\{|\\(|\\^|\\^=|\\||\\|=|\\|\\||~";this.BE={b:"\\\\[\\s\\S]",r:0};this.ASM={cN:"string",b:"'",e:"'",i:"\\n",c:[this.BE]};this.QSM={cN:"string",b:'"',e:'"',i:"\\n",c:[this.BE]};this.CLCM={cN:"comment",b:"//",e:"$"};this.CBLCLM={cN:"comment",b:"/\\*",e:"\\*/"};this.HCM={cN:"comment",b:"#",e:"$"};this.NM={cN:"number",b:this.NR,r:0};this.CNM={cN:"number",b:this.CNR,r:0};this.BNM={cN:"number",b:this.BNR,r:0};this.REGEXP_MODE={cN:"regexp",b:/\//,e:/\/[gim]*/,i:/\n/,c:[this.BE,{b:/\[/,e:/\]/,r:0,c:[this.BE]}]};this.TM={cN:"title",b:this.IR,r:0};this.UTM={cN:"title",b:this.UIR,r:0}}();hljs.registerLanguage("bash",function(b){var a={cN:"variable",v:[{b:/\$[\w\d#@][\w\d_]*/},{b:/\$\{(.*?)\}/}]};var d={cN:"string",b:/"/,e:/"/,c:[b.BE,a,{cN:"variable",b:/\$\(/,e:/\)/,c:[b.BE]}]};var c={cN:"string",b:/'/,e:/'/};return{l:/-?[a-z\.]+/,k:{keyword:"if then else elif fi for break continue while in do done exit return set declare case esac export exec",literal:"true false",built_in:"printf echo read cd pwd pushd popd dirs let eval unset typeset readonly getopts source shopt caller type hash bind help sudo",operator:"-ne -eq -lt -gt -f -d -e -s -l -a"},c:[{cN:"shebang",b:/^#![^\n]+sh\s*$/,r:10},{cN:"function",b:/\w[\w\d_]*\s*\(\s*\)\s*\{/,rB:true,c:[b.inherit(b.TM,{b:/\w[\w\d_]*/})],r:0},b.HCM,b.NM,d,c,a]}});hljs.registerLanguage("javascript",function(a){return{aliases:["js"],k:{keyword:"in if for while finally var new function do return void else break catch instanceof with throw case default try this switch continue typeof delete let yield const class",literal:"true false null undefined NaN Infinity",built_in:"eval isFinite isNaN parseFloat parseInt decodeURI decodeURIComponent encodeURI encodeURIComponent escape unescape Object Function Boolean Error EvalError InternalError RangeError ReferenceError StopIteration SyntaxError TypeError URIError Number Math Date String RegExp Array Float32Array Float64Array Int16Array Int32Array Int8Array Uint16Array Uint32Array Uint8Array Uint8ClampedArray ArrayBuffer DataView JSON Intl arguments require"},c:[{cN:"pi",b:/^\s*('|")use strict('|")/,r:10},a.ASM,a.QSM,a.CLCM,a.CBLCLM,a.CNM,{b:"("+a.RSR+"|\\b(case|return|throw)\\b)\\s*",k:"return throw case",c:[a.CLCM,a.CBLCLM,a.REGEXP_MODE,{b:/</,e:/>;/,r:0,sL:"xml"}],r:0},{cN:"function",bK:"function",e:/\{/,c:[a.inherit(a.TM,{b:/[A-Za-z$_][0-9A-Za-z$_]*/}),{cN:"params",b:/\(/,e:/\)/,c:[a.CLCM,a.CBLCLM],i:/["'\(]/}],i:/\[|%/},{b:/\$[(.]/},{b:"\\."+a.IR,r:0}]}});hljs.registerLanguage("lua",function(b){var a="\\[=*\\[";var e="\\]=*\\]";var c={b:a,e:e,c:["self"]};var d=[{cN:"comment",b:"--(?!"+a+")",e:"$"},{cN:"comment",b:"--"+a,e:e,c:[c],r:10}];return{l:b.UIR,k:{keyword:"and break do else elseif end false for if in local nil not or repeat return then true until while",built_in:"_G _VERSION assert collectgarbage dofile error getfenv getmetatable ipairs load loadfile loadstring module next pairs pcall print rawequal rawget rawset require select setfenv setmetatable tonumber tostring type unpack xpcall coroutine debug io math os package string table"},c:d.concat([{cN:"function",bK:"function",e:"\\)",c:[b.inherit(b.TM,{b:"([_a-zA-Z]\\w*\\.)*([_a-zA-Z]\\w*:)?[_a-zA-Z]\\w*"}),{cN:"params",b:"\\(",eW:true,c:d}].concat(d)},b.CNM,b.ASM,b.QSM,{cN:"string",b:a,e:e,c:[c],r:10}])}});hljs.registerLanguage("xml",function(a){var c="[A-Za-z0-9\\._:-]+";var d={b:/<\?(php)?(?!\w)/,e:/\?>/,sL:"php",subLanguageMode:"continuous"};var b={eW:true,i:/</,r:0,c:[d,{cN:"attribute",b:c,r:0},{b:"=",r:0,c:[{cN:"value",v:[{b:/"/,e:/"/},{b:/'/,e:/'/},{b:/[^\s\/>]+/}]}]}]};return{aliases:["html"],cI:true,c:[{cN:"doctype",b:"<!DOCTYPE",e:">",r:10,c:[{b:"\\[",e:"\\]"}]},{cN:"comment",b:"<!--",e:"-->",r:10},{cN:"cdata",b:"<\\!\\[CDATA\\[",e:"\\]\\]>",r:10},{cN:"tag",b:"<style(?=\\s|>|$)",e:">",k:{title:"style"},c:[b],starts:{e:"</style>",rE:true,sL:"css"}},{cN:"tag",b:"<script(?=\\s|>|$)",e:">",k:{title:"script"},c:[b],starts:{e:"<\/script>",rE:true,sL:"javascript"}},{b:"<%",e:"%>",sL:"vbscript"},d,{cN:"pi",b:/<\?\w+/,e:/\?>/,r:10},{cN:"tag",b:"</?",e:"/?>",c:[{cN:"title",b:"[^ /><]+",r:0},b]}]}});hljs.registerLanguage("markdown",function(a){return{c:[{cN:"header",v:[{b:"^#{1,6}",e:"$"},{b:"^.+?\\n[=-]{2,}$"}]},{b:"<",e:">",sL:"xml",r:0},{cN:"bullet",b:"^([*+-]|(\\d+\\.))\\s+"},{cN:"strong",b:"[*_]{2}.+?[*_]{2}"},{cN:"emphasis",v:[{b:"\\*.+?\\*"},{b:"_.+?_",r:0}]},{cN:"blockquote",b:"^>\\s+",e:"$"},{cN:"code",v:[{b:"`.+?`"},{b:"^( {4}|\t)",e:"$",r:0}]},{cN:"horizontal_rule",b:"^[-\\*]{3,}",e:"$"},{b:"\\[.+?\\][\\(\\[].+?[\\)\\]]",rB:true,c:[{cN:"link_label",b:"\\[",e:"\\]",eB:true,rE:true,r:0},{cN:"link_url",b:"\\]\\(",e:"\\)",eB:true,eE:true},{cN:"link_reference",b:"\\]\\[",e:"\\]",eB:true,eE:true,}],r:10},{b:"^\\[.+\\]:",e:"$",rB:true,c:[{cN:"link_reference",b:"\\[",e:"\\]",eB:true,eE:true},{cN:"link_url",b:"\\s",e:"$"}]}]}});hljs.registerLanguage("css",function(a){var b="[a-zA-Z-][a-zA-Z0-9_-]*";var c={cN:"function",b:b+"\\(",e:"\\)",c:["self",a.NM,a.ASM,a.QSM]};return{cI:true,i:"[=/|']",c:[a.CBLCLM,{cN:"id",b:"\\#[A-Za-z0-9_-]+"},{cN:"class",b:"\\.[A-Za-z0-9_-]+",r:0},{cN:"attr_selector",b:"\\[",e:"\\]",i:"$"},{cN:"pseudo",b:":(:)?[a-zA-Z0-9\\_\\-\\+\\(\\)\\\"\\']+"},{cN:"at_rule",b:"@(font-face|page)",l:"[a-z-]+",k:"font-face page"},{cN:"at_rule",b:"@",e:"[{;]",c:[{cN:"keyword",b:/\S+/},{b:/\s/,eW:true,eE:true,r:0,c:[c,a.ASM,a.QSM,a.NM]}]},{cN:"tag",b:b,r:0},{cN:"rules",b:"{",e:"}",i:"[^\\s]",r:0,c:[a.CBLCLM,{cN:"rule",b:"[^\\s]",rB:true,e:";",eW:true,c:[{cN:"attribute",b:"[A-Z\\_\\.\\-]+",e:":",eE:true,i:"[^\\s]",starts:{cN:"value",eW:true,eE:true,c:[c,a.NM,a.QSM,a.ASM,a.CBLCLM,{cN:"hexcolor",b:"#[0-9A-Fa-f]+"},{cN:"important",b:"!important"}]}}]}]}]}});hljs.registerLanguage("http",function(a){return{i:"\\S",c:[{cN:"status",b:"^HTTP/[0-9\\.]+",e:"$",c:[{cN:"number",b:"\\b\\d{3}\\b"}]},{cN:"request",b:"^[A-Z]+ (.*?) HTTP/[0-9\\.]+$",rB:true,e:"$",c:[{cN:"string",b:" ",e:" ",eB:true,eE:true}]},{cN:"attribute",b:"^\\w",e:": ",eE:true,i:"\\n|\\s|=",starts:{cN:"string",e:"$"}},{b:"\\n\\n",starts:{sL:"",eW:true}}]}});hljs.registerLanguage("php",function(b){var e={cN:"variable",b:"\\$+[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*"};var a={cN:"preprocessor",b:/<\?(php)?|\?>/};var c={cN:"string",c:[b.BE,a],v:[{b:'b"',e:'"'},{b:"b'",e:"'"},b.inherit(b.ASM,{i:null}),b.inherit(b.QSM,{i:null})]};var d={v:[b.BNM,b.CNM]};return{cI:true,k:"and include_once list abstract global private echo interface as static endswitch array null if endwhile or const for endforeach self var while isset public protected exit foreach throw elseif include __FILE__ empty require_once do xor return parent clone use __CLASS__ __LINE__ else break print eval new catch __METHOD__ case exception default die require __FUNCTION__ enddeclare final try switch continue endfor endif declare unset true false trait goto instanceof insteadof __DIR__ __NAMESPACE__ yield finally",c:[b.CLCM,b.HCM,{cN:"comment",b:"/\\*",e:"\\*/",c:[{cN:"phpdoc",b:"\\s@[A-Za-z]+"},a]},{cN:"comment",b:"__halt_compiler.+?;",eW:true,k:"__halt_compiler",l:b.UIR},{cN:"string",b:"<<<['\"]?\\w+['\"]?$",e:"^\\w+;",c:[b.BE]},a,e,{cN:"function",bK:"function",e:/[;{]/,i:"\\$|\\[|%",c:[b.UTM,{cN:"params",b:"\\(",e:"\\)",c:["self",e,b.CBLCLM,c,d]}]},{cN:"class",bK:"class interface",e:"{",i:/[:\(\$"]/,c:[{bK:"extends implements",r:10},b.UTM]},{bK:"namespace",e:";",i:/[\.']/,c:[b.UTM]},{bK:"use",e:";",c:[b.UTM]},{b:"=>"},c,d]}});hljs.registerLanguage("python",function(a){var f={cN:"prompt",b:/^(>>>|\.\.\.) /};var b={cN:"string",c:[a.BE],v:[{b:/(u|b)?r?'''/,e:/'''/,c:[f],r:10},{b:/(u|b)?r?"""/,e:/"""/,c:[f],r:10},{b:/(u|r|ur)'/,e:/'/,r:10},{b:/(u|r|ur)"/,e:/"/,r:10},{b:/(b|br)'/,e:/'/,},{b:/(b|br)"/,e:/"/,},a.ASM,a.QSM]};var d={cN:"number",r:0,v:[{b:a.BNR+"[lLjJ]?"},{b:"\\b(0o[0-7]+)[lLjJ]?"},{b:a.CNR+"[lLjJ]?"}]};var e={cN:"params",b:/\(/,e:/\)/,c:["self",f,d,b]};var c={e:/:/,i:/[${=;\n]/,c:[a.UTM,e]};return{k:{keyword:"and elif is global as in if from raise for except finally print import pass return exec else break not with class assert yield try while continue del or def lambda nonlocal|10 None True False",built_in:"Ellipsis NotImplemented"},i:/(<\/|->|\?)/,c:[f,d,b,a.HCM,a.inherit(c,{cN:"function",bK:"def",r:10}),a.inherit(c,{cN:"class",bK:"class"}),{cN:"decorator",b:/@/,e:/$/},{b:/\b(print|exec)\(/}]}});hljs.registerLanguage("sql",function(a){return{cI:true,i:/[<>]/,c:[{cN:"operator",b:"\\b(begin|end|start|commit|rollback|savepoint|lock|alter|create|drop|rename|call|delete|do|handler|insert|load|replace|select|truncate|update|set|show|pragma|grant|merge)\\b(?!:)",e:";",eW:true,k:{keyword:"all partial global month current_timestamp using go revoke smallint indicator end-exec disconnect zone with character assertion to add current_user usage input local alter match collate real then rollback get read timestamp session_user not integer bit unique day minute desc insert execute like ilike|2 level decimal drop continue isolation found where constraints domain right national some module transaction relative second connect escape close system_user for deferred section cast current sqlstate allocate intersect deallocate numeric public preserve full goto initially asc no key output collation group by union session both last language constraint column of space foreign deferrable prior connection unknown action commit view or first into float year primary cascaded except restrict set references names table outer open select size are rows from prepare distinct leading create only next inner authorization schema corresponding option declare precision immediate else timezone_minute external varying translation true case exception join hour default double scroll value cursor descriptor values dec fetch procedure delete and false int is describe char as at in varchar null trailing any absolute current_time end grant privileges when cross check write current_date pad begin temporary exec time update catalog user sql date on identity timezone_hour natural whenever interval work order cascade diagnostics nchar having left call do handler load replace truncate start lock show pragma exists number trigger if before after each row merge matched database",aggregate:"count sum min max avg"},c:[{cN:"string",b:"'",e:"'",c:[a.BE,{b:"''"}]},{cN:"string",b:'"',e:'"',c:[a.BE,{b:'""'}]},{cN:"string",b:"`",e:"`",c:[a.BE]},a.CNM]},a.CBLCLM,{cN:"comment",b:"--",e:"$"}]}});hljs.registerLanguage("handlebars",function(b){var a="each in with if else unless bindattr action collection debugger log outlet template unbound view yield";return{cI:true,sL:"xml",subLanguageMode:"continuous",c:[{cN:"expression",b:"{{",e:"}}",c:[{cN:"begin-block",b:"#[a-zA-Z- .]+",k:a},{cN:"string",b:'"',e:'"'},{cN:"end-block",b:"\\/[a-zA-Z- .]+",k:a},{cN:"variable",b:"[a-zA-Z-.]+",k:a}]}]}});hljs.registerLanguage("coffeescript",function(c){var b={keyword:"in if for while finally new do return else break catch instanceof throw try this switch continue typeof delete debugger super then unless until loop of by when and or is isnt not",literal:"true false null undefined yes no on off",reserved:"case default function var void with const let enum export import native __hasProp __extends __slice __bind __indexOf",built_in:"npm require console print module exports global window document"};var a="[A-Za-z$_][0-9A-Za-z$_]*";var f=c.inherit(c.TM,{b:a});var e={cN:"subst",b:/#\{/,e:/}/,k:b};var d=[c.BNM,c.inherit(c.CNM,{starts:{e:"(\\s*/)?",r:0}}),{cN:"string",v:[{b:/'''/,e:/'''/,c:[c.BE]},{b:/'/,e:/'/,c:[c.BE]},{b:/"""/,e:/"""/,c:[c.BE,e]},{b:/"/,e:/"/,c:[c.BE,e]}]},{cN:"regexp",v:[{b:"///",e:"///",c:[e,c.HCM]},{b:"//[gim]*",r:0},{b:"/\\S(\\\\.|[^\\n])*?/[gim]*(?=\\s|\\W|$)"}]},{cN:"property",b:"@"+a},{b:"`",e:"`",eB:true,eE:true,sL:"javascript"}];e.c=d;return{k:b,c:d.concat([{cN:"comment",b:"###",e:"###"},c.HCM,{cN:"function",b:"("+a+"\\s*=\\s*)?(\\(.*\\))?\\s*\\B[-=]>",e:"[-=]>",rB:true,c:[f,{cN:"params",b:"\\(",rB:true,c:[{b:/\(/,e:/\)/,k:b,c:["self"].concat(d)}]}]},{cN:"class",bK:"class",e:"$",i:/[:="\[\]]/,c:[{bK:"extends",eW:true,i:/[:="\[\]]/,c:[f]},f]},{cN:"attribute",b:a+":",e:":",rB:true,eE:true,r:0}])}});hljs.registerLanguage("json",function(a){var e={literal:"true false null"};var d=[a.QSM,a.CNM];var c={cN:"value",e:",",eW:true,eE:true,c:d,k:e};var b={b:"{",e:"}",c:[{cN:"attribute",b:'\\s*"',e:'"\\s*:\\s*',eB:true,eE:true,c:[a.BE],i:"\\n",starts:c}],i:"\\S"};var f={b:"\\[",e:"\\]",c:[a.inherit(c,{cN:null})],i:"\\S"};d.splice(d.length,0,b,f);return{c:d,k:e,i:"\\S"}});hljs.registerLanguage("django",function(a){var b={cN:"filter",b:/\|[A-Za-z]+\:?/,k:"truncatewords removetags linebreaksbr yesno get_digit timesince random striptags filesizeformat escape linebreaks length_is ljust rjust cut urlize fix_ampersands title floatformat capfirst pprint divisibleby add make_list unordered_list urlencode timeuntil urlizetrunc wordcount stringformat linenumbers slice date dictsort dictsortreversed default_if_none pluralize lower join center default truncatewords_html upper length phone2numeric wordwrap time addslashes slugify first escapejs force_escape iriencode last safe safeseq truncatechars localize unlocalize localtime utc timezone",c:[{cN:"argument",b:/"/,e:/"/},{cN:"argument",b:/'/,e:/'/}]};return{cI:true,sL:"xml",subLanguageMode:"continuous",c:[{cN:"template_comment",b:/\{%\s*comment\s*%}/,e:/\{%\s*endcomment\s*%}/},{cN:"template_comment",b:/\{#/,e:/#}/},{cN:"template_tag",b:/\{%/,e:/%}/,k:"comment endcomment load templatetag ifchanged endifchanged if endif firstof for endfor in ifnotequal endifnotequal widthratio extends include spaceless endspaceless regroup by as ifequal endifequal ssi now with cycle url filter endfilter debug block endblock else autoescape endautoescape csrf_token empty elif endwith static trans blocktrans endblocktrans get_static_prefix get_media_prefix plural get_current_language language get_available_languages get_current_language_bidi get_language_info get_language_info_list localize endlocalize localtime endlocaltime timezone endtimezone get_current_timezone verbatim",c:[b]},{cN:"variable",b:/\{\{/,e:/}}/,c:[b]}]}});hljs.registerLanguage("scss",function(a){var c="[a-zA-Z-][a-zA-Z0-9_-]*";var d={cN:"function",b:c+"\\(",e:"\\)",c:["self",a.NM,a.ASM,a.QSM]};var b={cN:"hexcolor",b:"#[0-9A-Fa-f]+"};var e={cN:"attribute",b:"[A-Z\\_\\.\\-]+",e:":",eE:true,i:"[^\\s]",starts:{cN:"value",eW:true,eE:true,c:[d,b,a.NM,a.QSM,a.ASM,a.CBLCLM,{cN:"important",b:"!important"}]}};return{cI:true,i:"[=/|']",c:[a.CLCM,a.CBLCLM,{cN:"function",b:c+"\\(",e:"\\)",c:["self",a.NM,a.ASM,a.QSM]},{cN:"id",b:"\\#[A-Za-z0-9_-]+",r:0},{cN:"class",b:"\\.[A-Za-z0-9_-]+",r:0},{cN:"attr_selector",b:"\\[",e:"\\]",i:"$"},{cN:"tag",b:"\\b(a|abbr|acronym|address|area|article|aside|audio|b|base|big|blockquote|body|br|button|canvas|caption|cite|code|col|colgroup|command|datalist|dd|del|details|dfn|div|dl|dt|em|embed|fieldset|figcaption|figure|footer|form|frame|frameset|(h[1-6])|head|header|hgroup|hr|html|i|iframe|img|input|ins|kbd|keygen|label|legend|li|link|map|mark|meta|meter|nav|noframes|noscript|object|ol|optgroup|option|output|p|param|pre|progress|q|rp|rt|ruby|samp|script|section|select|small|span|strike|strong|style|sub|sup|table|tbody|td|textarea|tfoot|th|thead|time|title|tr|tt|ul|var|video)\\b",r:0},{cN:"pseudo",b:":(visited|valid|root|right|required|read-write|read-only|out-range|optional|only-of-type|only-child|nth-of-type|nth-last-of-type|nth-last-child|nth-child|not|link|left|last-of-type|last-child|lang|invalid|indeterminate|in-range|hover|focus|first-of-type|first-line|first-letter|first-child|first|enabled|empty|disabled|default|checked|before|after|active)"},{cN:"pseudo",b:"::(after|before|choices|first-letter|first-line|repeat-index|repeat-item|selection|value)"},{cN:"attribute",b:"\\b(z-index|word-wrap|word-spacing|word-break|width|widows|white-space|visibility|vertical-align|unicode-bidi|transition-timing-function|transition-property|transition-duration|transition-delay|transition|transform-style|transform-origin|transform|top|text-underline-position|text-transform|text-shadow|text-rendering|text-overflow|text-indent|text-decoration-style|text-decoration-line|text-decoration-color|text-decoration|text-align-last|text-align|tab-size|table-layout|right|resize|quotes|position|pointer-events|perspective-origin|perspective|page-break-inside|page-break-before|page-break-after|padding-top|padding-right|padding-left|padding-bottom|padding|overflow-y|overflow-x|overflow-wrap|overflow|outline-width|outline-style|outline-offset|outline-color|outline|orphans|order|opacity|object-position|object-fit|normal|none|nav-up|nav-right|nav-left|nav-index|nav-down|min-width|min-height|max-width|max-height|mask|marks|margin-top|margin-right|margin-left|margin-bottom|margin|list-style-type|list-style-position|list-style-image|list-style|line-height|letter-spacing|left|justify-content|initial|inherit|ime-mode|image-orientation|image-resolution|image-rendering|icon|hyphens|height|font-weight|font-variant-ligatures|font-variant|font-style|font-stretch|font-size-adjust|font-size|font-language-override|font-kerning|font-feature-settings|font-family|font|float|flex-wrap|flex-shrink|flex-grow|flex-flow|flex-direction|flex-basis|flex|filter|empty-cells|display|direction|cursor|counter-reset|counter-increment|content|column-width|column-span|column-rule-width|column-rule-style|column-rule-color|column-rule|column-gap|column-fill|column-count|columns|color|clip-path|clip|clear|caption-side|break-inside|break-before|break-after|box-sizing|box-shadow|box-decoration-break|bottom|border-width|border-top-width|border-top-style|border-top-right-radius|border-top-left-radius|border-top-color|border-top|border-style|border-spacing|border-right-width|border-right-style|border-right-color|border-right|border-radius|border-left-width|border-left-style|border-left-color|border-left|border-image-width|border-image-source|border-image-slice|border-image-repeat|border-image-outset|border-image|border-color|border-collapse|border-bottom-width|border-bottom-style|border-bottom-right-radius|border-bottom-left-radius|border-bottom-color|border-bottom|border|background-size|background-repeat|background-position|background-origin|background-image|background-color|background-clip|background-attachment|background|backface-visibility|auto|animation-timing-function|animation-play-state|animation-name|animation-iteration-count|animation-fill-mode|animation-duration|animation-direction|animation-delay|animation|align-self|align-items|align-content)\\b",i:"[^\\s]"},{cN:"value",b:"\\b(whitespace|wait|w-resize|visible|vertical-text|vertical-ideographic|uppercase|upper-roman|upper-alpha|underline|transparent|top|thin|thick|text|text-top|text-bottom|tb-rl|table-header-group|table-footer-group|sw-resize|super|strict|static|square|solid|small-caps|separate|se-resize|scroll|s-resize|rtl|row-resize|ridge|right|repeat|repeat-y|repeat-x|relative|progress|pointer|overline|outside|outset|oblique|nowrap|not-allowed|normal|none|nw-resize|no-repeat|no-drop|newspaper|ne-resize|n-resize|move|middle|medium|ltr|lr-tb|lowercase|lower-roman|lower-alpha|loose|list-item|line|line-through|line-edge|lighter|left|keep-all|justify|italic|inter-word|inter-ideograph|inside|inset|inline|inline-block|inherit|inactive|ideograph-space|ideograph-parenthesis|ideograph-numeric|ideograph-alpha|horizontal|hidden|help|hand|groove|fixed|ellipsis|e-resize|double|dotted|distribute|distribute-space|distribute-letter|distribute-all-lines|disc|disabled|default|decimal|dashed|crosshair|collapse|col-resize|circle|char|center|capitalize|break-word|break-all|bottom|both|bolder|bold|block|bidi-override|below|baseline|auto|always|all-scroll|absolute|table|table-cell)\\b"},{cN:"value",b:":",e:";",c:[b,a.NM,a.QSM,a.ASM,{cN:"important",b:"!important"}]},{cN:"at_rule",b:"@",e:"[{;]",k:"mixin include extend for if else each while charset import debug media page content font-face namespace warn",c:[d,a.QSM,a.ASM,b,a.NM,{cN:"preprocessor",b:"\\s[A-Za-z0-9_.-]+",r:0}]}]}});
i18n.py
# The MIT License (MIT) # Copyright (c) 2021 Tom J. Sun # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import sys import json from os import listdir, walk from os.path import isfile, join import re
SRC_DIR = '../src' TRANSLATION_FILES_DIR = 'translations' def find_translation_slugs(): """Searches the src directory for all 'slugs' that should be translated by looking for matches of the pattern ( 'string' ) """ slugs = {} for (dirpath, _, filenames) in walk(SRC_DIR): for filename in filenames: if not filename.endswith('.py'): continue with open(join(dirpath, filename), 'r') as src_file: contents = src_file.read() for match in re.findall(r'\( \'(.+?)\' \)', contents): slugs[match] = True return slugs def load_translations(translation_file): """Loads translations from the given file and returns them as a map""" translations = json.load(translation_file) for slug, translation in list(translations.items()): del translations[slug] translations[slug.replace('\n', '\\n')] = translation.replace('\n', '\\n') return translations def main(): """Main handler""" slugs = find_translation_slugs() if sys.argv[1] == 'validate': translation_filenames = [ f for f in listdir(TRANSLATION_FILES_DIR) if isfile(join(TRANSLATION_FILES_DIR, f)) ] for translation_filename in translation_filenames: print('Validating %s...' % translation_filename) valid = True with open(join(TRANSLATION_FILES_DIR, translation_filename), 'r') as translation_file: translations = load_translations(translation_file) for slug in slugs: if slug not in translations or translations[slug] == '': print('Missing translation for "%s"' % slug) valid = False for translation_slug in translations: if translation_slug not in slugs: print('Unnecessary translation for "%s"' % translation_slug) valid = False if valid: print('OK') elif sys.argv[1] == 'new': locale = sys.argv[2] translations = {} for slug in slugs: translations[slug.replace('\\n', '\n')] = '' with open(join(TRANSLATION_FILES_DIR, '%s.json' % locale), 'w') as translation_file: translation_file.write(json.dumps(translations, sort_keys=True, indent=4)) elif sys.argv[1] == 'translate': locale = sys.argv[2] output_dir = sys.argv[3] with open(join(TRANSLATION_FILES_DIR, '%s.json' % locale), 'r') as translation_file: translations = load_translations(translation_file) for (dirpath, _, filenames) in walk(output_dir): for filename in filenames: if not filename.endswith('.py'): continue with open(join(dirpath, filename), 'r') as src_file: contents = src_file.read() for slug, translation in translations.items(): contents = contents.replace( '( \'%s\' )' % slug, '"""%s"""' % translation ) with open(join(dirpath, filename + '.tmp'), 'w') as tmp_src_file: tmp_src_file.write(contents) shutil.move(join(dirpath, filename + '.tmp'), join(dirpath, filename)) if __name__ == '__main__': main()
import shutil
llvm.rs
//! LLVM FFI functions. //! //! The reason we don't use rustc's llvm FFI is because rustc uses llvm 13 (at the time of writing). //! While NVVM expects llvm 7 bitcode/suppported things. And we don't use llvm-sys because this allows us //! to only include what we need, as well as use safe references instead of pointers. //! //! Most of this code was taken from rustc_codegen_llvm with many things removed. #![allow(non_camel_case_types, non_snake_case, non_upper_case_globals)] // we have a lot of functions we linked to from cg_llvm that we don't use // but likely will use in the future, so we ignore any unused functions // in case we need them in the future for things like debug info or LTO. #![allow(dead_code)] use libc::{c_char, c_uint, size_t}; use libc::{c_int, c_ulonglong}; use std::ffi::{CStr, CString}; use std::fmt; use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::ptr::{self}; use crate::builder::unnamed; pub use debuginfo::*; impl PartialEq for Value { fn eq(&self, other: &Self) -> bool { ptr::eq(self, other) } } impl Eq for Value {} impl Hash for Value { fn hash<H: Hasher>(&self, hasher: &mut H) { (self as *const Self).hash(hasher); } } impl fmt::Debug for Value { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { unsafe { // ideally we'd print the type but the llvm 7 C api doesnt have a way to do this :( f.write_str("(")?; let ptr = LLVMPrintValueToString(self); let cstr = CString::from_raw(ptr); let string = cstr.to_string_lossy(); f.write_str(&string)?; f.write_str(")") } } } impl LLVMRustResult { pub fn into_result(self) -> Result<(), ()> { match self { LLVMRustResult::Success => Ok(()), LLVMRustResult::Failure => Err(()), } } } #[derive(Copy, Clone, PartialEq)] #[repr(C)] pub enum CodeGenOptSize { CodeGenOptSizeNone = 0, CodeGenOptSizeDefault = 1, CodeGenOptSizeAggressive = 2, } pub use self::CodeGenOptSize::*; #[derive(Copy, Clone)] pub enum AttributePlace { ReturnValue, Argument(u32), Function, } impl AttributePlace { pub fn as_uint(self) -> c_uint { match self { AttributePlace::ReturnValue => 0, AttributePlace::Argument(i) => 1 + i, AttributePlace::Function => !0, } } } impl Attribute { pub fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) { unsafe { LLVMRustAddFunctionAttribute(llfn, idx.as_uint(), *self) } } pub fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) { unsafe { LLVMRustAddCallSiteAttribute(callsite, idx.as_uint(), *self) } } pub fn unapply_llfn(&self, idx: AttributePlace, llfn: &Value) { unsafe { LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), *self) } } } /// Safe wrapper around `LLVMGetParam`, because segfaults are no fun. pub(crate) fn get_param(llfn: &Value, index: c_uint) -> &Value { unsafe { assert!( index < LLVMCountParams(llfn), "out of bounds argument access: {} out of {} arguments", index, LLVMCountParams(llfn) ); LLVMGetParam(llfn, index) } } /// Safe wrapper for `LLVMGetValueName2` into a byte slice pub(crate) fn get_value_name(value: &Value) -> &[u8] { unsafe { let mut len = 0; let data = LLVMGetValueName2(value, &mut len); std::slice::from_raw_parts(data.cast(), len) } } /// Safe wrapper for `LLVMSetValueName2` from a byte slice pub(crate) fn set_value_name(value: &Value, name: &[u8]) { unsafe { let data = name.as_ptr().cast(); LLVMSetValueName2(value, data, name.len()); } } pub fn last_error() -> Option<String> { unsafe { let cstr = LLVMRustGetLastError(); if cstr.is_null() { None } else { let err = CStr::from_ptr(cstr).to_bytes(); let err = String::from_utf8_lossy(err).to_string(); libc::free(cstr as *mut _); Some(err) } } } pub(crate) fn SetUnnamedAddress<'a>(global: &'a Value, unnamed: UnnamedAddr) { unsafe { LLVMSetUnnamedAddress(global, unnamed); } } pub(crate) type Bool = c_uint; pub const True: Bool = 1 as Bool; pub const False: Bool = 0 as Bool; #[derive(Copy, Clone, PartialEq)] #[repr(C)] #[allow(dead_code)] // Variants constructed by C++. pub(crate) enum LLVMRustResult { Success, Failure, } /// LLVMRustLinkage #[derive(Copy, Clone, PartialEq)] #[repr(C)] pub(crate) enum Linkage { ExternalLinkage = 0, AvailableExternallyLinkage = 1, LinkOnceAnyLinkage = 2, LinkOnceODRLinkage = 3, WeakAnyLinkage = 4, WeakODRLinkage = 5, AppendingLinkage = 6, InternalLinkage = 7, PrivateLinkage = 8, ExternalWeakLinkage = 9, CommonLinkage = 10, } // LLVMRustVisibility #[repr(C)] #[derive(Copy, Clone, PartialEq)] pub(crate) enum Visibility { Default = 0, Hidden = 1, Protected = 2, } /// LLVMUnnamedAddr #[repr(C)] pub(crate) enum UnnamedAddr { No, Local, Global, } /// LLVMDLLStorageClass #[derive(Copy, Clone)] #[repr(C)] pub(crate) enum DLLStorageClass { #[allow(dead_code)] Default = 0, #[allow(dead_code)] DllExport = 2, // Function to be accessible from DLL. } /// Matches LLVMRustAttribute in LLVMWrapper.h /// Semantically a subset of the C++ enum llvm::Attribute::AttrKind, /// though it is not ABI compatible (since it's a C++ enum) #[repr(C)] #[derive(Copy, Clone, Debug)] pub(crate) enum Attribute { AlwaysInline = 0, Cold = 2, InlineHint = 3, MinSize = 4, NoAlias = 6, NoCapture = 7, NoInline = 8, NonNull = 9, NoReturn = 11, NoUnwind = 12, OptimizeForSize = 13, OptimizeNone = 14, ReadOnly = 15, SExt = 16, StructRet = 17, ZExt = 19, InReg = 20, ReadNone = 24, } /// LLVMIntPredicate #[derive(Copy, Clone)] #[repr(C)] pub(crate) enum IntPredicate { IntEQ = 32, IntNE = 33, IntUGT = 34, IntUGE = 35, IntULT = 36, IntULE = 37, IntSGT = 38, IntSGE = 39, IntSLT = 40, IntSLE = 41, } impl IntPredicate { pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self { match intpre { rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ, rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE, rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT, rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE, rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT, rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE, rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT, rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE, rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT, rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE, } } } /// LLVMTypeKind #[allow(dead_code)] #[derive(Copy, Clone, PartialEq, Debug)] #[repr(C)] pub(crate) enum TypeKind { Void = 0, Half = 1, Float = 2, Double = 3, Label = 7, Integer = 8, Function = 9, Struct = 10, Array = 11, Pointer = 12, Vector = 13, Metadata = 14, Token = 16, ScalableVector = 17, BFloat = 18, } impl TypeKind { pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind { match self { TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void, TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half, TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float, TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double, TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label, TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer, TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function, TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct, TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array, TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer, TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector, TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata, TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token, TypeKind::ScalableVector => rustc_codegen_ssa::common::TypeKind::ScalableVector, TypeKind::BFloat => rustc_codegen_ssa::common::TypeKind::BFloat, } } } /// LLVMMetadataType #[derive(Copy, Clone)] #[repr(C)] pub(crate) enum MetadataType { MD_range = 4, MD_invariant_load = 6, MD_nontemporal = 9, MD_nonnull = 11, } /// LLVMRustAsmDialect #[derive(Copy, Clone)] #[repr(C)] pub enum AsmDialect { Other, Att, Intel, } impl AsmDialect { pub fn from_generic(asm: rustc_ast::LlvmAsmDialect) -> Self { match asm { rustc_ast::LlvmAsmDialect::Att => AsmDialect::Att, rustc_ast::LlvmAsmDialect::Intel => AsmDialect::Intel, } } } /// LLVMRustDiagnosticKind #[derive(Copy, Clone)] #[repr(C)] #[allow(dead_code)] // Variants constructed by C++. pub(crate) enum DiagnosticKind { Other, InlineAsm, StackSize, DebugMetadataVersion, SampleProfile, OptimizationRemark, OptimizationRemarkMissed, OptimizationRemarkAnalysis, OptimizationRemarkAnalysisFPCommute, OptimizationRemarkAnalysisAliasing, OptimizationRemarkOther, OptimizationFailure, PGOProfile, Linker, Unsupported, } /// LLVMRustDiagnosticLevel #[derive(Copy, Clone)] #[repr(C)] #[allow(dead_code)] // Variants constructed by C++. pub(crate) enum DiagnosticLevel { Error, Warning, Note, Remark, } #[repr(C)] #[derive(Clone, Copy, Debug, PartialEq)] pub enum LLVMVerifierFailureAction { /// Print to stderr and abort the process. LLVMAbortProcessAction = 0, /// Print to stderr and return 1. LLVMPrintMessageAction = 1, /// Return 1 and print nothing. LLVMReturnStatusAction = 2, } /// LLVMRustPassKind #[derive(Copy, Clone, PartialEq, Debug)] #[repr(C)] #[allow(dead_code)] // Variants constructed by C++. pub(crate) enum PassKind { Other, Function, Module, } /// LLVMRustThinLTOData extern "C" { pub(crate) type ThinLTOData; } /// LLVMRustThinLTOBuffer extern "C" { pub(crate) type ThinLTOBuffer; } /// LLVMRustThinLTOModule #[repr(C)] pub(crate) struct ThinLTOModule { pub identifier: *const c_char, pub data: *const u8, pub len: usize, } extern "C" { type Opaque; } #[repr(C)] struct InvariantOpaque<'a> { _marker: PhantomData<&'a mut &'a ()>, _opaque: Opaque, } // Opaque pointer types extern "C" { pub(crate) type Module; } extern "C" { pub type Context; } extern "C" { pub(crate) type Type; } extern "C" { pub(crate) type Value; } extern "C" { pub(crate) type ConstantInt; } extern "C" { pub type Metadata; } extern "C" { pub(crate) type BasicBlock; } #[repr(C)] pub(crate) struct Builder<'a> { _inv: InvariantOpaque<'a>, } #[repr(C)] pub(crate) struct OperandBundleDef<'a>(InvariantOpaque<'a>); extern "C" { pub(crate) type ModuleBuffer; } #[repr(C)] pub struct PassManager<'a>(InvariantOpaque<'a>); extern "C" { pub type PassManagerBuilder; } extern "C" { pub type Pass; } extern "C" { pub type TargetMachine; } extern "C" { pub(crate) type MemoryBuffer; } /// LLVMRustChecksumKind #[derive(Copy, Clone)] #[repr(C)] pub enum ChecksumKind { None, MD5, SHA1, SHA256, } pub mod debuginfo { use super::{InvariantOpaque, Metadata}; use bitflags::bitflags; #[repr(C)] pub(crate) struct DIBuilder<'a>(InvariantOpaque<'a>); pub type DIDescriptor = Metadata; pub type DIScope = DIDescriptor; pub type DILocation = DIDescriptor; pub type DIFile = DIScope; pub type DILexicalBlock = DIScope; pub type DISubprogram = DIScope; pub type DINameSpace = DIScope; pub type DIType = DIDescriptor; pub type DIBasicType = DIType; pub type DIDerivedType = DIType; pub type DICompositeType = DIDerivedType; pub type DIVariable = DIDescriptor; pub type DIGlobalVariable = DIDescriptor; pub type DIArray = DIDescriptor; pub type DISubrange = DIDescriptor; pub type DIEnumerator = DIDescriptor; pub type DITemplateTypeParameter = DIDescriptor; // These values **must** match with LLVMRustDIFlags!! bitflags! { #[repr(C)] #[derive(Default)] pub struct DIFlags: u32 { const FlagZero = 0; const FlagPrivate = 1; const FlagProtected = 2; const FlagPublic = 3; const FlagFwdDecl = (1 << 2); const FlagAppleBlock = (1 << 3); const FlagBlockByrefStruct = (1 << 4); const FlagVirtual = (1 << 5); const FlagArtificial = (1 << 6); const FlagExplicit = (1 << 7); const FlagPrototyped = (1 << 8); const FlagObjcClassComplete = (1 << 9); const FlagObjectPointer = (1 << 10); const FlagVector = (1 << 11); const FlagStaticMember = (1 << 12); const FlagLValueReference = (1 << 13); const FlagRValueReference = (1 << 14); const FlagExternalTypeRef = (1 << 15); const FlagIntroducedVirtual = (1 << 18); const FlagBitField = (1 << 19); const FlagNoReturn = (1 << 20); const FlagMainSubprogram = (1 << 21); } } } // These functions are kind of a hack for the future. They wrap LLVM 7 rust shim functions // and turn them into the API that the llvm 12 shim has. This way, if nvidia ever updates their // dinosaur llvm version, switching for us should be extremely easy. `Name` is assumed to be // a utf8 string pub(crate) unsafe fn LLVMRustGetOrInsertFunction<'a>( M: &'a Module, Name: *const c_char, NameLen: usize, FunctionTy: &'a Type, ) -> &'a Value
pub(crate) unsafe fn LLVMRustGetOrInsertGlobal<'a>( M: &'a Module, Name: *const c_char, NameLen: usize, FunctionTy: &'a Type, AddressSpace: c_uint, ) -> &'a Value { let str = std::str::from_utf8_unchecked(std::slice::from_raw_parts(Name.cast(), NameLen)); let cstring = CString::new(str).expect("str with nul"); __LLVMRustGetOrInsertGlobal(M, cstring.as_ptr(), FunctionTy, AddressSpace) } pub(crate) unsafe fn LLVMRustBuildCall<'a>( B: &Builder<'a>, Fn: &'a Value, Args: *const &'a Value, NumArgs: c_uint, Bundle: Option<&OperandBundleDef<'a>>, ) -> &'a Value { __LLVMRustBuildCall(B, Fn, Args, NumArgs, Bundle, unnamed()) } /// LLVMRustCodeGenOptLevel #[derive(Copy, Clone, PartialEq)] #[repr(C)] pub enum CodeGenOptLevel { Other, None, Less, Default, Aggressive, } /// LLVMRelocMode #[derive(Copy, Clone, PartialEq)] #[repr(C)] pub enum RelocMode { Default, Static, PIC, DynamicNoPic, ROPI, RWPI, ROPI_RWPI, } /// LLVMRustCodeModel #[derive(Copy, Clone)] #[repr(C)] pub enum CodeModel { Other, Small, Kernel, Medium, Large, None, } extern "C" { #[link_name = "LLVMRustBuildCall"] pub(crate) fn __LLVMRustBuildCall<'a>( B: &Builder<'a>, Fn: &'a Value, Args: *const &'a Value, NumArgs: c_uint, Bundle: Option<&OperandBundleDef<'a>>, Name: *const c_char, ) -> &'a Value; #[link_name = "LLVMRustGetOrInsertGlobal"] fn __LLVMRustGetOrInsertGlobal<'a>( M: &'a Module, Name: *const c_char, T: &'a Type, AddressSpace: c_uint, ) -> &'a Value; // see comment on function before this extern block #[link_name = "LLVMRustGetOrInsertFunction"] fn __LLVMRustGetOrInsertFunction<'a>( M: &'a Module, Name: *const c_char, FunctionTy: &'a Type, ) -> &'a Value; // dont trace these functions or cargo will error, see init.rs pub(crate) fn LLVMStartMultithreaded() -> Bool; pub(crate) fn LLVMInitializeNVPTXTargetInfo(); pub(crate) fn LLVMInitializeNVPTXTarget(); pub(crate) fn LLVMInitializeNVPTXTargetMC(); pub(crate) fn LLVMInitializeNVPTXAsmPrinter(); pub(crate) fn LLVMInitializePasses(); pub(crate) fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char); } // use rustc_codegen_nvvm_macros::trace_ffi_calls; // #[trace_ffi_calls] extern "C" { pub(crate) fn LLVMAddGlobalInAddressSpace<'a>( M: &'a Module, Ty: &'a Type, Name: *const c_char, AddressSpace: c_uint, ) -> &'a Value; pub(crate) fn LLVMGetOperand(Val: &Value, Index: c_uint) -> &Value; pub(crate) fn LLVMIsABitCastInst(Val: &Value) -> Option<&Value>; pub(crate) fn LLVMIsASelectInst(Val: &Value) -> Option<&Value>; pub(crate) fn LLVMRustGetFunctionType(V: &Value) -> &Type; pub(crate) fn LLVMLinkModules2(Dest: &Module, Src: &Module) -> Bool; pub(crate) fn LLVMParseIRInContext<'ll, 'a, 'b>( ContextRef: &'ll Context, MemBuf: &'a MemoryBuffer, OutM: *mut &'b Module, OutMessage: *mut *mut c_char, ) -> Bool; pub(crate) fn LLVMCreateMemoryBufferWithMemoryRange<'a>( InputData: *const c_char, InputDataLength: usize, BufferName: *const c_char, RequiresNullTerminator: Bool, ) -> &'a mut MemoryBuffer; pub(crate) fn LLVMDisposeMemoryBuffer<'a>(MemBuf: &'a mut MemoryBuffer); pub(crate) fn LLVMSetCurrentDebugLocation<'a>(Builder: &Builder<'a>, L: &'a Value); pub(crate) fn LLVMGetModuleContext(M: &Module) -> &Context; pub(crate) fn LLVMGetMDKindIDInContext( C: &Context, Name: *const c_char, SLen: c_uint, ) -> c_uint; pub(crate) fn LLVMRustDebugMetadataVersion() -> u32; pub(crate) fn LLVMRustVersionMajor() -> u32; pub(crate) fn LLVMRustVersionMinor() -> u32; pub(crate) fn LLVMRustAddModuleFlag(M: &Module, name: *const c_char, value: u32); pub(crate) fn LLVMRustMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; pub(crate) fn LLVMRustDIBuilderCreate<'a>(M: &'a Module) -> &'a mut DIBuilder<'a>; pub(crate) fn LLVMRustDIBuilderDispose<'a>(Builder: &'a mut DIBuilder<'a>); pub(crate) fn LLVMRustDIBuilderFinalize<'a>(Builder: &DIBuilder<'a>); pub(crate) fn LLVMRustDIBuilderCreateCompileUnit<'a>( Builder: &DIBuilder<'a>, Lang: c_uint, File: &'a DIFile, Producer: *const c_char, isOptimized: bool, Flags: *const c_char, RuntimeVer: c_uint, SplitName: *const c_char, ) -> &'a DIDescriptor; pub(crate) fn LLVMRustDIBuilderCreateFile<'a>( Builder: &DIBuilder<'a>, Filename: *const c_char, Directory: *const c_char, ) -> &'a DIFile; pub(crate) fn LLVMRustDIBuilderCreateSubroutineType<'a>( Builder: &DIBuilder<'a>, ParameterTypes: &'a DIArray, ) -> &'a DICompositeType; pub(crate) fn LLVMRustDIBuilderCreateFunction<'a>( Builder: &DIBuilder<'a>, Scope: &'a DIDescriptor, Name: *const c_char, LinkageName: *const c_char, File: &'a DIFile, LineNo: c_uint, Ty: &'a DIType, isLocalToUnit: bool, isDefinition: bool, ScopeLine: c_uint, Flags: DIFlags, isOptimized: bool, MaybeFn: Option<&'a Value>, TParam: &'a DIArray, Decl: Option<&'a DIDescriptor>, ) -> &'a DISubprogram; pub(crate) fn LLVMRustDIBuilderCreateBasicType<'a>( Builder: &DIBuilder<'a>, Name: *const c_char, SizeInBits: u64, Encoding: c_uint, ) -> &'a DIBasicType; pub(crate) fn LLVMRustDIBuilderCreatePointerType<'a>( Builder: &DIBuilder<'a>, PointeeTy: &'a DIType, SizeInBits: u64, AlignInBits: u32, Name: *const c_char, ) -> &'a DIDerivedType; pub(crate) fn LLVMRustDIBuilderCreateStructType<'a>( Builder: &DIBuilder<'a>, Scope: Option<&'a DIDescriptor>, Name: *const c_char, File: &'a DIFile, LineNumber: c_uint, SizeInBits: u64, AlignInBits: u32, Flags: DIFlags, DerivedFrom: Option<&'a DIType>, Elements: &'a DIArray, RunTimeLang: c_uint, VTableHolder: Option<&'a DIType>, UniqueId: *const c_char, ) -> &'a DICompositeType; pub(crate) fn LLVMRustDIBuilderCreateMemberType<'a>( Builder: &DIBuilder<'a>, Scope: &'a DIDescriptor, Name: *const c_char, File: &'a DIFile, LineNo: c_uint, SizeInBits: u64, AlignInBits: u32, OffsetInBits: u64, Flags: DIFlags, Ty: &'a DIType, ) -> &'a DIDerivedType; pub(crate) fn LLVMRustDIBuilderCreateVariantMemberType<'a>( Builder: &DIBuilder<'a>, Scope: &'a DIScope, Name: *const c_char, File: &'a DIFile, LineNumber: c_uint, SizeInBits: u64, AlignInBits: u32, OffsetInBits: u64, Discriminant: Option<&'a Value>, Flags: DIFlags, Ty: &'a DIType, ) -> &'a DIType; pub(crate) fn LLVMRustDIBuilderCreateLexicalBlock<'a>( Builder: &DIBuilder<'a>, Scope: &'a DIScope, File: &'a DIFile, Line: c_uint, Col: c_uint, ) -> &'a DILexicalBlock; pub(crate) fn LLVMRustDIBuilderCreateLexicalBlockFile<'a>( Builder: &DIBuilder<'a>, Scope: &'a DIScope, File: &'a DIFile, ) -> &'a DILexicalBlock; pub(crate) fn LLVMRustDIBuilderCreateStaticVariable<'a>( Builder: &DIBuilder<'a>, Context: Option<&'a DIScope>, Name: *const c_char, LinkageName: *const c_char, File: &'a DIFile, LineNo: c_uint, Ty: &'a DIType, isLocalToUnit: bool, Val: &'a Value, Decl: Option<&'a DIDescriptor>, AlignInBits: u32, ) -> &'a DIGlobalVariable; pub(crate) fn LLVMRustDIBuilderCreateVariable<'a>( Builder: &DIBuilder<'a>, Tag: c_uint, Scope: &'a DIDescriptor, Name: *const c_char, File: &'a DIFile, LineNo: c_uint, Ty: &'a DIType, AlwaysPreserve: bool, Flags: DIFlags, ArgNo: c_uint, AlignInBits: u32, ) -> &'a DIVariable; pub(crate) fn LLVMRustDIBuilderCreateArrayType<'a>( Builder: &DIBuilder<'a>, Size: u64, AlignInBits: u32, Ty: &'a DIType, Subscripts: &'a DIArray, ) -> &'a DIType; pub(crate) fn LLVMRustDIBuilderCreateVectorType<'a>( Builder: &DIBuilder<'a>, Size: u64, AlignInBits: u32, Ty: &'a DIType, Subscripts: &'a DIArray, ) -> &'a DIType; pub(crate) fn LLVMRustDIBuilderGetOrCreateSubrange<'a>( Builder: &DIBuilder<'a>, Lo: i64, Count: i64, ) -> &'a DISubrange; pub(crate) fn LLVMRustDIBuilderGetOrCreateArray<'a>( Builder: &DIBuilder<'a>, Ptr: *const Option<&'a DIDescriptor>, Count: c_uint, ) -> &'a DIArray; pub(crate) fn LLVMRustDIBuilderInsertDeclareAtEnd<'a>( Builder: &DIBuilder<'a>, Val: &'a Value, VarInfo: &'a DIVariable, AddrOps: *const i64, AddrOpsCount: c_uint, DL: &'a DILocation, InsertAtEnd: &'a BasicBlock, ) -> &'a Value; pub(crate) fn LLVMRustDIBuilderCreateEnumerator<'a>( Builder: &DIBuilder<'a>, Name: *const c_char, Val: u64, ) -> &'a DIEnumerator; pub(crate) fn LLVMRustDIBuilderCreateEnumerationType<'a>( Builder: &DIBuilder<'a>, Scope: &'a DIScope, Name: *const c_char, File: &'a DIFile, LineNumber: c_uint, SizeInBits: u64, AlignInBits: u32, Elements: &'a DIArray, ClassType: &'a DIType, ) -> &'a DIType; pub(crate) fn LLVMRustDIBuilderCreateUnionType<'a>( Builder: &DIBuilder<'a>, Scope: Option<&'a DIScope>, Name: *const c_char, File: &'a DIFile, LineNumber: c_uint, SizeInBits: u64, AlignInBits: u32, Flags: DIFlags, Elements: Option<&'a DIArray>, RunTimeLang: c_uint, UniqueId: *const c_char, ) -> &'a DIType; pub(crate) fn LLVMRustDIBuilderCreateVariantPart<'a>( Builder: &DIBuilder<'a>, Scope: &'a DIScope, Name: *const c_char, File: &'a DIFile, LineNo: c_uint, SizeInBits: u64, AlignInBits: u32, Flags: DIFlags, Discriminator: Option<&'a DIDerivedType>, Elements: &'a DIArray, UniqueId: *const c_char, ) -> &'a DIDerivedType; pub(crate) fn LLVMSetUnnamedAddr<'a>(GlobalVar: &'a Value, UnnamedAddr: Bool); pub(crate) fn LLVMRustDIBuilderCreateTemplateTypeParameter<'a>( Builder: &DIBuilder<'a>, Scope: Option<&'a DIScope>, Name: *const c_char, Ty: &'a DIType, ) -> &'a DITemplateTypeParameter; pub(crate) fn LLVMRustDIBuilderCreateNameSpace<'a>( Builder: &DIBuilder<'a>, Scope: Option<&'a DIScope>, Name: *const c_char, ) -> &'a DINameSpace; pub(crate) fn LLVMRustDICompositeTypeReplaceArrays<'a>( Builder: &DIBuilder<'a>, CompositeType: &'a DIType, Elements: Option<&'a DIArray>, Params: Option<&'a DIArray>, ); pub(crate) fn LLVMRustDICompositeTypeSetTypeArray<'a>( Builder: &DIBuilder<'a>, CompositeType: &'a DIType, TypeArray: &'a DIArray, ); pub(crate) fn LLVMRustDIBuilderCreateDebugLocation<'a>( Line: c_uint, Column: c_uint, Scope: &'a DIScope, InlinedAt: Option<&'a Metadata>, ) -> &'a DILocation; pub fn LLVMRustDIBuilderCreateOpDeref() -> i64; pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> i64; pub(crate) fn LLVMRustRunFunctionPassManager(PM: &PassManager, M: &Module); pub(crate) fn LLVMRustAddAlwaysInlinePass(P: &PassManagerBuilder, AddLifetimes: bool); pub(crate) fn LLVMRustAddBuilderLibraryInfo( PMB: &PassManagerBuilder, M: &Module, DisableSimplifyLibCalls: bool, ); pub(crate) fn LLVMRustConfigurePassManagerBuilder( PMB: &PassManagerBuilder, OptLevel: CodeGenOptLevel, MergeFunctions: bool, SLPVectorize: bool, LoopVectorize: bool, PrepareForThinLTO: bool, PGOGenPath: *const c_char, PGOUsePath: *const c_char, ); pub(crate) fn LLVMRustCreateTargetMachine<'a>( Triple: *const c_char, CPU: *const c_char, Features: *const c_char, Model: CodeModel, Reloc: RelocMode, Level: CodeGenOptLevel, UseSoftFP: bool, PositionIndependentExecutable: bool, FunctionSections: bool, DataSections: bool, TrapUnreachable: bool, Singlethread: bool, ) -> Option<&'static mut TargetMachine>; pub(crate) fn LLVMRustAddAnalysisPasses<'a>( T: &'a TargetMachine, PM: &'a PassManager, M: &'a Module, ); pub(crate) fn LLVMRustPassKind(Pass: &Pass) -> PassKind; pub(crate) fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>; pub(crate) fn LLVMRustAddPass<'a>(PM: &'a PassManager, Pass: &'static mut Pass); /// Writes a module to the specified path. Returns 0 on success. pub(crate) fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int; /// Creates a pass manager. pub(crate) fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>; /// Creates a function-by-function pass manager pub(crate) fn LLVMCreateFunctionPassManagerForModule<'a>( M: &'a Module, ) -> &'a mut PassManager<'a>; /// Disposes a pass manager. pub(crate) fn LLVMDisposePassManager<'a>(PM: &'a mut PassManager<'a>); /// Runs a pass manager on a module. pub(crate) fn LLVMRunPassManager<'a>(PM: &PassManager<'a>, M: &'a Module) -> Bool; pub(crate) fn LLVMTimeTraceProfilerFinish(FileName: *const c_char); pub(crate) fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>); pub(crate) fn LLVMPassManagerBuilderCreate() -> &'static mut PassManagerBuilder; pub(crate) fn LLVMPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder); pub(crate) fn LLVMPassManagerBuilderSetSizeLevel(PMB: &PassManagerBuilder, Value: Bool); pub(crate) fn LLVMPassManagerBuilderSetDisableUnrollLoops( PMB: &PassManagerBuilder, Value: Bool, ); pub(crate) fn LLVMPassManagerBuilderUseInlinerWithThreshold( PMB: &PassManagerBuilder, threshold: c_uint, ); pub(crate) fn LLVMPassManagerBuilderPopulateModulePassManager( PMB: &PassManagerBuilder, PM: &PassManager<'_>, ); pub(crate) fn LLVMPassManagerBuilderPopulateFunctionPassManager( PMB: &PassManagerBuilder, PM: &PassManager<'_>, ); pub(crate) fn LLVMPassManagerBuilderPopulateLTOPassManager( PMB: &PassManagerBuilder, PM: &PassManager<'_>, Internalize: Bool, RunInliner: Bool, ); pub(crate) fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager( PMB: &PassManagerBuilder, PM: &PassManager<'_>, ); // functions that cg_llvm doesnt use but we do. mostly for int_replace. pub(crate) fn LLVMGetReturnType(FunctionTy: &Type) -> &Type; pub(crate) fn LLVMGetParams(Fn: &Value, Params: *mut &Value); pub(crate) fn LLVMGetEntryBasicBlock(Fn: &Value) -> &BasicBlock; pub(crate) fn LLVMGetNamedFunction(M: &Module, Name: *const c_char) -> &Value; pub(crate) fn LLVMRustGetFunctionReturnType(V: &Value) -> &Type; pub(crate) fn LLVMSetTarget(M: &Module, Triple: *const c_char); // Create and destroy contexts. pub(crate) fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context; pub(crate) fn LLVMContextDispose(C: &'static mut Context); // Create modules. pub(crate) fn LLVMModuleCreateWithNameInContext( ModuleID: *const c_char, C: &Context, ) -> &Module; pub(crate) fn LLVMSetDataLayout(M: &Module, Triple: *const c_char); pub(crate) fn LLVMRustAppendModuleInlineAsm(M: &Module, Asm: *const c_char, AsmLen: size_t); /// See llvm::LLVMTypeKind::getTypeID. pub(crate) fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind; pub(crate) fn LLVMPrintTypeToString(Val: &Type) -> *mut c_char; // Operations on integer types pub(crate) fn LLVMInt1TypeInContext(C: &Context) -> &Type; pub(crate) fn LLVMInt8TypeInContext(C: &Context) -> &Type; pub(crate) fn LLVMInt16TypeInContext(C: &Context) -> &Type; pub(crate) fn LLVMInt32TypeInContext(C: &Context) -> &Type; pub(crate) fn LLVMInt64TypeInContext(C: &Context) -> &Type; pub(crate) fn LLVMIntTypeInContext(C: &Context, NumBits: c_uint) -> &Type; pub(crate) fn LLVMGetIntTypeWidth(IntegerTy: &Type) -> c_uint; // Operations on real types pub(crate) fn LLVMFloatTypeInContext(C: &Context) -> &Type; pub(crate) fn LLVMDoubleTypeInContext(C: &Context) -> &Type; // Operations on function types pub(crate) fn LLVMFunctionType<'a>( ReturnType: &'a Type, ParamTypes: *const &'a Type, ParamCount: c_uint, IsVarArg: Bool, ) -> &'a Type; pub(crate) fn LLVMCountParamTypes(FunctionTy: &Type) -> c_uint; pub(crate) fn LLVMGetParamTypes<'a>(FunctionTy: &'a Type, Dest: *mut &'a Type); // Operations on struct types pub(crate) fn LLVMStructTypeInContext<'a>( C: &'a Context, ElementTypes: *const &'a Type, ElementCount: c_uint, Packed: Bool, ) -> &'a Type; pub(crate) fn LLVMGetStructElementTypes<'a>(StructTy: &'a Type, Dest: *mut &'a Type); pub(crate) fn LLVMCountStructElementTypes(StructTy: &Type) -> c_uint; pub(crate) fn LLVMIsPackedStruct(StructTy: &Type) -> Bool; // Operations on array, pointer, and vector types (sequence types) pub(crate) fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type; pub(crate) fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type; pub(crate) fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type; pub(crate) fn LLVMGetElementType(Ty: &Type) -> &Type; pub(crate) fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint; pub(crate) fn LLVMRustGetValueType(V: &Value) -> &Type; // Operations on other types pub(crate) fn LLVMVoidTypeInContext(C: &Context) -> &Type; pub(crate) fn LLVMRustMetadataTypeInContext(C: &Context) -> &Type; // Operations on all values pub(crate) fn LLVMTypeOf(Val: &Value) -> &Type; pub(crate) fn LLVMGetValueName2(Val: &Value, Length: *mut size_t) -> *const c_char; pub(crate) fn LLVMSetValueName2(Val: &Value, Name: *const c_char, NameLen: size_t); pub(crate) fn LLVMReplaceAllUsesWith<'a>(OldVal: &'a Value, NewVal: &'a Value); pub(crate) fn LLVMSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Node: &'a Value); pub(crate) fn LLVMPrintValueToString<'a>(Val: &'a Value) -> *mut c_char; // Operations on constants of any type pub(crate) fn LLVMConstNull(Ty: &Type) -> &Value; pub(crate) fn LLVMGetUndef(Ty: &Type) -> &Value; // Operations on metadata pub(crate) fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value; pub(crate) fn LLVMMDNodeInContext<'a>( C: &'a Context, Vals: *const &'a Value, Count: c_uint, ) -> &'a Value; pub(crate) fn LLVMAddNamedMetadataOperand<'a>( M: &'a Module, Name: *const c_char, Val: &'a Value, ); // Operations on scalar constants pub(crate) fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value; pub(crate) fn LLVMConstIntOfArbitraryPrecision( IntTy: &Type, Wn: c_uint, Ws: *const u64, ) -> &Value; pub(crate) fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value; pub(crate) fn LLVMConstIntGetZExtValue(ConstantVal: &ConstantInt) -> c_ulonglong; pub(crate) fn LLVMRustConstInt128Get( ConstantVal: &ConstantInt, SExt: bool, high: &mut u64, low: &mut u64, ) -> bool; // Operations on composite constants pub(crate) fn LLVMConstStringInContext( C: &Context, Str: *const c_char, Length: c_uint, DontNullTerminate: Bool, ) -> &Value; pub(crate) fn LLVMConstStructInContext<'a>( C: &'a Context, ConstantVals: *const &'a Value, Count: c_uint, Packed: Bool, ) -> &'a Value; pub(crate) fn LLVMConstArray<'a>( ElementTy: &'a Type, ConstantVals: *const &'a Value, Length: c_uint, ) -> &'a Value; pub(crate) fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value; // Constant expressions pub(crate) fn LLVMConstInBoundsGEP<'a>( ConstantVal: &'a Value, ConstantIndices: *const &'a Value, NumIndices: c_uint, ) -> &'a Value; pub(crate) fn LLVMConstZExt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub(crate) fn LLVMConstPtrToInt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub(crate) fn LLVMConstIntToPtr<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub(crate) fn LLVMConstBitCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub(crate) fn LLVMConstPointerCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub(crate) fn LLVMConstExtractValue( AggConstant: &Value, IdxList: *const c_uint, NumIdx: c_uint, ) -> &Value; // Operations on global variables, functions, and aliases (globals) pub(crate) fn LLVMIsDeclaration(Global: &Value) -> Bool; pub(crate) fn LLVMRustGetLinkage(Global: &Value) -> Linkage; pub(crate) fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage); pub(crate) fn LLVMSetSection(Global: &Value, Section: *const c_char); pub(crate) fn LLVMRustGetVisibility(Global: &Value) -> Visibility; pub(crate) fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility); pub(crate) fn LLVMRustSetDSOLocal(Global: &Value, is_dso_local: bool); pub(crate) fn LLVMGetAlignment(Global: &Value) -> c_uint; pub(crate) fn LLVMSetAlignment(Global: &Value, Bytes: c_uint); pub(crate) fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass); // Operations on global variables pub(crate) fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>; pub(crate) fn LLVMAddGlobal<'a>(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value; pub(crate) fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>; pub(crate) fn LLVMRustInsertPrivateGlobal<'a>(M: &'a Module, T: &'a Type) -> &'a Value; pub(crate) fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>; pub(crate) fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>; pub(crate) fn LLVMDeleteGlobal(GlobalVar: &Value); pub(crate) fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>; pub(crate) fn LLVMSetInitializer<'a>(GlobalVar: &'a Value, ConstantVal: &'a Value); pub(crate) fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; pub(crate) fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); pub(crate) fn LLVMRustGetNamedValue( M: &Module, Name: *const c_char, NameLen: size_t, ) -> Option<&Value>; pub(crate) fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); pub(crate) fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); // Operations on functions pub(crate) fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint); pub(crate) fn LLVMRustAddAlignmentAttr(Fn: &Value, index: c_uint, bytes: u32); pub(crate) fn LLVMRustAddFunctionAttribute(Fn: &Value, index: c_uint, attr: Attribute); pub(crate) fn LLVMRustAddFunctionAttrStringValue( Fn: &Value, index: c_uint, Name: *const c_char, Value: *const c_char, ); pub(crate) fn LLVMRustRemoveFunctionAttributes(Fn: &Value, index: c_uint, attr: Attribute); // Operations on parameters pub(crate) fn LLVMIsAArgument(Val: &Value) -> Option<&Value>; pub(crate) fn LLVMCountParams(Fn: &Value) -> c_uint; pub(crate) fn LLVMGetParam(Fn: &Value, Index: c_uint) -> &Value; // Operations on basic blocks pub(crate) fn LLVMGetBasicBlockParent(BB: &BasicBlock) -> &Value; pub(crate) fn LLVMAppendBasicBlockInContext<'a>( C: &'a Context, Fn: &'a Value, Name: *const c_char, ) -> &'a BasicBlock; // Operations on instructions pub(crate) fn LLVMIsAInstruction(Val: &Value) -> Option<&Value>; pub(crate) fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock; // Operations on call sites pub(crate) fn LLVMRustAddCallSiteAttribute(Instr: &Value, index: c_uint, attr: Attribute); pub(crate) fn LLVMRustAddCallSiteAttrString(Instr: &Value, index: c_uint, Name: *const c_char); pub(crate) fn LLVMRustAddAlignmentCallSiteAttr(Instr: &Value, index: c_uint, bytes: u32); pub(crate) fn LLVMRustAddDereferenceableCallSiteAttr(Instr: &Value, index: c_uint, bytes: u64); pub(crate) fn LLVMRustAddDereferenceableOrNullCallSiteAttr( Instr: &Value, index: c_uint, bytes: u64, ); // Operations on load/store instructions (only) pub(crate) fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool); // Operations on phi nodes pub(crate) fn LLVMAddIncoming<'a>( PhiNode: &'a Value, IncomingValues: *const &'a Value, IncomingBlocks: *const &'a BasicBlock, Count: c_uint, ); // Instruction builders pub(crate) fn LLVMCreateBuilderInContext<'a>(C: &'a Context) -> &'a mut Builder<'a>; pub(crate) fn LLVMPositionBuilderAtEnd<'a>(Builder: &Builder<'a>, Block: &'a BasicBlock); pub(crate) fn LLVMGetInsertBlock<'a>(Builder: &Builder<'a>) -> &'a BasicBlock; pub(crate) fn LLVMDisposeBuilder<'a>(Builder: &'a mut Builder<'a>); pub(crate) fn LLVMBuildUnreachable<'a>(B: &Builder<'a>) -> &'a Value; // Terminators pub(crate) fn LLVMBuildRetVoid<'a>(B: &Builder<'a>) -> &'a Value; pub(crate) fn LLVMBuildRet<'a>(B: &Builder<'a>, V: &'a Value) -> &'a Value; pub(crate) fn LLVMBuildBr<'a>(B: &Builder<'a>, Dest: &'a BasicBlock) -> &'a Value; pub(crate) fn LLVMBuildCondBr<'a>( B: &Builder<'a>, If: &'a Value, Then: &'a BasicBlock, Else: &'a BasicBlock, ) -> &'a Value; pub(crate) fn LLVMBuildSwitch<'a>( B: &Builder<'a>, V: &'a Value, Else: &'a BasicBlock, NumCases: c_uint, ) -> &'a Value; // Add a case to the switch instruction pub(crate) fn LLVMAddCase<'a>(Switch: &'a Value, OnVal: &'a Value, Dest: &'a BasicBlock); // Arithmetic pub(crate) fn LLVMBuildAdd<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFAdd<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildSub<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFSub<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildMul<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFMul<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildUDiv<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildExactUDiv<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildSDiv<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildExactSDiv<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFDiv<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildURem<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildSRem<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFRem<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildShl<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildLShr<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildAShr<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildNSWAdd<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildNUWAdd<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildNSWSub<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildNUWSub<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildNSWMul<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildNUWMul<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildAnd<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildOr<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildXor<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildNeg<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value; pub(crate) fn LLVMBuildFNeg<'a>( B: &Builder<'a>, V: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildNot<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value; pub(crate) fn LLVMRustSetFastMath(Instr: &Value); // Memory pub(crate) fn LLVMBuildAlloca<'a>( B: &Builder<'a>, Ty: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildArrayAlloca<'a>( B: &Builder<'a>, Ty: &'a Type, Val: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildLoad<'a>( B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildStore<'a>(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value; pub(crate) fn LLVMBuildGEP<'a>( B: &Builder<'a>, Pointer: &'a Value, Indices: *const &'a Value, NumIndices: c_uint, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildInBoundsGEP<'a>( B: &Builder<'a>, Pointer: &'a Value, Indices: *const &'a Value, NumIndices: c_uint, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildStructGEP<'a>( B: &Builder<'a>, Pointer: &'a Value, Idx: c_uint, Name: *const c_char, ) -> &'a Value; // Casts pub(crate) fn LLVMBuildTrunc<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildZExt<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildSExt<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFPToUI<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFPToSI<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildUIToFP<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildSIToFP<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFPTrunc<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFPExt<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildPtrToInt<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildIntToPtr<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildBitCast<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildPointerCast<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMRustBuildIntCast<'a>( B: &Builder<'a>, Val: &'a Value, DestTy: &'a Type, IsSized: bool, ) -> &'a Value; // Comparisons pub(crate) fn LLVMBuildICmp<'a>( B: &Builder<'a>, Op: c_uint, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildFCmp<'a>( B: &Builder<'a>, Op: c_uint, LHS: &'a Value, RHS: &'a Value, Name: *const c_char, ) -> &'a Value; // Miscellaneous instructions pub(crate) fn LLVMBuildPhi<'a>(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value; pub(crate) fn LLVMRustGetInstrProfIncrementIntrinsic<'a>(M: &Module) -> &'a Value; pub(crate) fn LLVMRustBuildMemCpy<'a>( B: &Builder<'a>, Dst: &'a Value, DstAlign: c_uint, Src: &'a Value, SrcAlign: c_uint, Size: &'a Value, IsVolatile: bool, ) -> &'a Value; pub(crate) fn LLVMRustBuildMemMove<'a>( B: &Builder<'a>, Dst: &'a Value, DstAlign: c_uint, Src: &'a Value, SrcAlign: c_uint, Size: &'a Value, IsVolatile: bool, ) -> &'a Value; pub(crate) fn LLVMRustBuildMemSet<'a>( B: &Builder<'a>, Dst: &'a Value, DstAlign: c_uint, Val: &'a Value, Size: &'a Value, IsVolatile: bool, ) -> &'a Value; pub(crate) fn LLVMBuildSelect<'a>( B: &Builder<'a>, If: &'a Value, Then: &'a Value, Else: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildVAArg<'a>( B: &Builder<'a>, list: &'a Value, Ty: &'a Type, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildExtractElement<'a>( B: &Builder<'a>, VecVal: &'a Value, Index: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildInsertElement<'a>( B: &Builder<'a>, VecVal: &'a Value, EltVal: &'a Value, Index: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildShuffleVector<'a>( B: &Builder<'a>, V1: &'a Value, V2: &'a Value, Mask: &'a Value, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildExtractValue<'a>( B: &Builder<'a>, AggVal: &'a Value, Index: c_uint, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMBuildInsertValue<'a>( B: &Builder<'a>, AggVal: &'a Value, EltVal: &'a Value, Index: c_uint, Name: *const c_char, ) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceFAdd<'a>( B: &Builder<'a>, Acc: &'a Value, Src: &'a Value, ) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceFMul<'a>( B: &Builder<'a>, Acc: &'a Value, Src: &'a Value, ) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceAdd<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceMul<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceAnd<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceOr<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceXor<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceMin<'a>( B: &Builder<'a>, Src: &'a Value, IsSigned: bool, ) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceMax<'a>( B: &Builder<'a>, Src: &'a Value, IsSigned: bool, ) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceFMin<'a>( B: &Builder<'a>, Src: &'a Value, IsNaN: bool, ) -> &'a Value; pub(crate) fn LLVMRustBuildVectorReduceFMax<'a>( B: &Builder<'a>, Src: &'a Value, IsNaN: bool, ) -> &'a Value; pub(crate) fn LLVMRustBuildMinNum<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, ) -> &'a Value; pub(crate) fn LLVMRustBuildMaxNum<'a>( B: &Builder<'a>, LHS: &'a Value, RHS: &'a Value, ) -> &'a Value; pub(crate) fn LLVMDisposeMessage(message: *mut c_char); /// Returns a string describing the last error caused by an LLVMRust* call. pub(crate) fn LLVMRustGetLastError() -> *const c_char; pub(crate) fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type; pub(crate) fn LLVMStructSetBody<'a>( StructTy: &'a Type, ElementTypes: *const &'a Type, ElementCount: c_uint, Packed: Bool, ); /// Prepares inline assembly. pub(crate) fn LLVMRustInlineAsm( Ty: &Type, AsmString: *const c_char, AsmStringLen: size_t, Constraints: *const c_char, ConstraintsLen: size_t, SideEffects: Bool, AlignStack: Bool, Dialect: AsmDialect, ) -> &Value; pub(crate) fn LLVMRustInlineAsmVerify( Ty: &Type, Constraints: *const c_char, ConstraintsLen: size_t, ) -> bool; pub(crate) fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>; pub(crate) fn LLVMRustPrintModule<'a>( M: &'a Module, Output: *const c_char, Demangle: extern "C" fn(*const c_char, size_t, *mut c_char, size_t) -> size_t, ) -> LLVMRustResult; pub(crate) fn LLVMRustModuleBufferCreate(M: &Module) -> &'static mut ModuleBuffer; pub(crate) fn LLVMRustModuleBufferPtr(p: &ModuleBuffer) -> *const u8; pub(crate) fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize; pub(crate) fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer); pub(crate) fn LLVMRustModuleCost(M: &Module) -> u64; pub(crate) fn LLVMRustThinLTOBufferCreate(M: &Module) -> &'static mut ThinLTOBuffer; pub(crate) fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer); pub(crate) fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char; pub(crate) fn LLVMRustThinLTOBufferLen(M: &ThinLTOBuffer) -> size_t; pub(crate) fn LLVMRustCreateThinLTOData( Modules: *const ThinLTOModule, NumModules: c_uint, PreservedSymbols: *const *const c_char, PreservedSymbolsLen: c_uint, ) -> Option<&'static mut ThinLTOData>; pub(crate) fn LLVMRustPrepareThinLTOResolveWeak(Data: &ThinLTOData, Module: &Module) -> bool; pub(crate) fn LLVMRustPrepareThinLTOInternalize(Data: &ThinLTOData, Module: &Module) -> bool; pub(crate) fn LLVMRustFreeThinLTOData(Data: &'static mut ThinLTOData); pub(crate) fn LLVMRustParseBitcodeForLTO( Context: &Context, Data: *const u8, len: usize, Identifier: *const c_char, ) -> Option<&Module>; pub(crate) fn LLVMRustGetBitcodeSliceFromObjectData( Data: *const u8, len: usize, out_len: &mut usize, ) -> *const u8; pub(crate) fn LLVMRustAddDereferenceableAttr(Fn: &Value, index: c_uint, bytes: u64); pub(crate) fn LLVMRustAddDereferenceableOrNullAttr(Fn: &Value, index: c_uint, bytes: u64); pub(crate) fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock); }
{ let str = std::str::from_utf8_unchecked(std::slice::from_raw_parts(Name.cast(), NameLen)); let cstring = CString::new(str).expect("str with nul"); __LLVMRustGetOrInsertFunction(M, cstring.as_ptr(), FunctionTy) }
preprocess_data_verify.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Program to preprocess training and test data using word2vec, dep2vec and fact2vec embeddings and prepare corresponding weight vectors to be used in CNN Copyright (C) 2016 Ubiquitous Knowledge Processing (UKP) Lab Technische Universität Darmstadt Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np import cPickle from collections import defaultdict import sys, re import pandas as pd def preprocess_data(corpus): """ Process the training and test data usable for training """ claims = [] #trainset exp_file = corpus[0] nonexp_file = corpus[1] unv_file = corpus[2] #test set exp_file1 = corpus[3] nonexp_file1 = corpus[4] unv_file1 = corpus[5] vocab = defaultdict(float) with open(exp_file, "rb") as f: for line in f: claim = [] claim.append(line.strip()) orig_claim = clean_str(" ".join(claim)) #print rev words = set(orig_claim.split()) for word in words: vocab[word] += 1 datum = {"y":1, "text": orig_claim, "num_words": len(orig_claim.split()), "split": "train" } claims.append(datum) with open(nonexp_file, "rb") as f: for line in f: claim = [] claim.append(line.strip()) orig_claim = clean_str(" ".join(claim)) words = set(orig_claim.split()) for word in words: vocab[word] += 1 datum = {"y":0, "text": orig_claim, "num_words": len(orig_claim.split()), "split": "train" } claims.append(datum) with open(unv_file, "rb") as f: for line in f: claim = [] claim.append(line.strip()) orig_claim = clean_str(" ".join(claim)) words = set(orig_claim.split()) for word in words: vocab[word] += 1 datum = {"y":2, "text": orig_claim, "num_words": len(orig_claim.split()), "split": "train" } claims.append(datum) with open(exp_file1, "rb") as f: for line in f: claim = [] claim.append(line.strip()) orig_claim = clean_str(" ".join(claim)) words = set(orig_claim.split()) for word in words: vocab[word] += 1 datum = {"y":1, "text": orig_claim, "num_words": len(orig_claim.split()), "split": "test" } claims.append(datum) with open(nonexp_file1, "rb") as f: for line in f: claim = [] claim.append(line.strip()) orig_claim = clean_str(" ".join(claim)) words = set(orig_claim.split()) for word in words: vocab[word] += 1 datum = {"y":0, "text": orig_claim, "num_words": len(orig_claim.split()), "split": "test" } claims.append(datum) with open(unv_file1, "rb") as f: for line in f: claim = [] claim.append(line.strip()) orig_claim = clean_str(" ".join(claim)) words = set(orig_claim.split()) for word in words: vocab[word] += 1 datum = {"y":2, "text": orig_claim, "num_words": len(orig_claim.split()), "split": "test" } claims.append(datum) return claims, vocab def g
word_vecs, k=300): """ Get word matrix. W[i] is the vector for word indexed by i """ vocab_size = len(word_vecs) word_idx_map = dict() W = np.zeros(shape=(vocab_size+1, k)) W[0] = np.zeros(k) i = 1 for word in word_vecs: W[i] = word_vecs[word] word_idx_map[word] = i i += 1 return W, word_idx_map def load_bin_vec(fname, vocab): """ Loads 300x1 word vecs from Google (Mikolov) word2vec """ word_vecs = {} with open(fname, "rb") as f: header = f.readline() vocab_size, layer1_size = map(int, header.split()) binary_len = np.dtype('float32').itemsize * layer1_size for line in xrange(vocab_size): word = [] while True: ch = f.read(1) if ch == ' ': word = ''.join(word) break if ch != '\n': word.append(ch) if word in vocab: wvector = np.fromstring(f.read(binary_len), dtype='float32') word_vecs[word] = wvector[0:300] #print "wordvec", word_vecs[word] else: f.read(binary_len) return word_vecs def load_levy_vec(fname, vocab, k): #deps.words is a dependency word embeddings from Omer levy work word_vecs = {} with open(fname, "rb") as f: for line in f: wordvector = line.strip().split(' ') word = wordvector[0] #print ('word vector',word,wordvector) del wordvector[0] if word in vocab: vec = np.array(wordvector, dtype='float32') word_vecs[word]=vec[0:k] return word_vecs def add_unknown_words(word_vecs, vocab, min_df=1, k=300): """ For words that occur in at least min_df documents, create a separate word vector. 0.25 is chosen so the unknown vectors have (approximately) same variance as pre-trained ones """ for word in vocab: if word not in word_vecs and vocab[word] >= min_df: word_vecs[word] = np.random.uniform(-0.25,0.25,k) def clean_str(string): """ Tokenization/string cleaning for dataset """ string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) string = re.sub(r"\'s", " \'s", string) string = re.sub(r"\'ve", " \'ve", string) string = re.sub(r"n\'t", " n\'t", string) string = re.sub(r"\'re", " \'re", string) string = re.sub(r"\'d", " \'d", string) string = re.sub(r"\'ll", " \'ll", string) string = re.sub(r",", " , ", string) string = re.sub(r"!", " ! ", string) string = re.sub(r"\(", " \( ", string) string = re.sub(r"\)", " \) ", string) string = re.sub(r"\?", " \? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip().lower() if __name__=="__main__": w2v_file = sys.argv[1] d2v_file = sys.argv[2] f2v_file = sys.argv[3] data_folder = ["../data/data_verifiable_unverifiable/train_data/verify_exp/verify_exp.txt","../data/data_verifiable_unverifiable/train_data/verify_nonexp/verify_nonexp.txt","../data/data_verifiable_unverifiable/train_data/verify_unv/unverify.txt","../data/data_verifiable_unverifiable/test_data/verify_exp/verify_exp.txt","../data/data_verifiable_unverifiable/test_data/verify_nonexp/verify_nonexp.txt","../data/data_verifiable_unverifiable/test_data/verify_unv/unverify.txt"] #train and dev splits #data_folder = ["../data/data_verifiable_unverifiable/train_dev_data/train_data/verify_exp/verify_exp.txt","../data/data_verifiable_unverifiable/train_dev_data/train_data/verify_nonexp/verify_nonexp.txt","../data/data_verifiable_unverifiable/train_dev_data/train_data/verify_unv/unverify.txt","../data/data_verifiable_unverifiable/train_dev_data/dev_data/verify_exp/verify_exp.txt","../data/data_verifiable_unverifiable/train_dev_data/dev_data/verify_nonexp/verify_nonexp.txt","../data/data_verifiable_unverifiable/train_dev_data/dev_data/verify_unv/unverify.txt"] print "loading data..." claims, vocab = preprocess_data(data_folder) max_l = np.max(pd.DataFrame(claims)["num_words"]) print "data loaded!" print "number of sentences: " + str(len(claims)) print "vocab size: " + str(len(vocab)) print "max sentence length: " + str(max_l) #loading word2vec embeddings print "loading word2vec vectors...", w2v = load_bin_vec(w2v_file, vocab) print "word2vec loaded!" print "num words already in word2vec: " + str(len(w2v)) #load omerlevy word embeddings print "loading dependency word2vec vectors...", d2v = load_levy_vec(d2v_file, vocab, 300) print "dep2vec loaded!" print "num words already in dep2vec: " + str(len(d2v)) #loading factual embeddings print "loading fact2vec vectors...", f2v = load_bin_vec(f2v_file, vocab) print "fact2vec loaded!" print "num words already in fact2vec: " + str(len(f2v)) #Add distributions for unseen words in w2v add_unknown_words(w2v, vocab) # get weight vector using w2v W_w2v, word_idx_map_w2v = get_W(w2v) #Add distributions for unseen words in d2v add_unknown_words(d2v, vocab) # get weight vector using d2v W_d2v, word_idx_map_d2v = get_W(d2v) #Add distributions for unseen words in f2v add_unknown_words(f2v, vocab) # get weight vector using f2v W_f2v, word_idx_map_f2v = get_W(f2v) #random vectors and weights rand_vecs = {} add_unknown_words(rand_vecs, vocab) W2, _ = get_W(rand_vecs) # consider 100 dimensions from each embeddings to form 300 dimnesion stacked embedding vector k = 100 #concatenated embeddings - word2vec, de2vec, fact2vec embeddings cvecs = {} #concatenate all vectors into final vector for voc in w2v: if voc in d2v and voc in f2v: cvecs[voc] = np.concatenate([w2v[voc][0:k],d2v[voc][0:k],f2v[voc][0:k]]) elif voc not in d2v and voc in f2v: cvecs[voc] = np.concatenate([w2v[voc][0:2*k],f2v[voc][0:k]]) elif voc in d2v and voc not in f2v: cvecs[voc] = np.concatenate([w2v[voc][0:2*k],d2v[voc][0:k]]) else: cvecs[voc] = w2v[voc] #Add distributions for unseen words in cvecs add_unknown_words(cvecs, vocab) # get weight vector using cvecs W_concat, word_idx_map_concat = get_W(cvecs) cPickle.dump([claims, W_w2v, W2, word_idx_map_w2v, vocab, max_l], open("claims_verify_word2vec.p", "wb")) cPickle.dump([claims, W_d2v, W2, word_idx_map_d2v, vocab, max_l], open("claims_verify_dep2vec.p", "wb")) cPickle.dump([claims, W_f2v, W2, word_idx_map_f2v, vocab, max_l], open("claims_verify_fact2vec.p", "wb")) cPickle.dump([claims, W_concat, W2, word_idx_map_concat, vocab, max_l], open("claims_verify_concat.p", "wb")) print ("word2vec, dep2vec, fact2vec, stacked pickle datasets are created!")
et_W(
api.rs
use reqwest::{Client, Response}; use std::marker::PhantomData; use regex::Regex; pub(crate) trait ApiClient { fn get_client(&self) -> &Client; fn
<T>(&self, route: &str) -> T where for<'de> T: serde::Deserialize<'de> { let result = self.get_client().get(route).send(); match result { Ok(mut response) => { if !response.status().is_success() { panic!(format!("Unable to access resource: {}", response.status())); } let deserialized = response.json::<T>(); match deserialized { Ok(resource) => resource, Err(error) => { panic!(format!("Unable to deserialize response body: {}", error)); } } }, Err(error) => { panic!(format!("Unable to complete HTTP request: {}", error)); } } } fn get_many<T>(&self, route: &str, since: Option<usize>, limit: Option<usize>) -> Vec<T> where for<'de> T: serde::Deserialize<'de> { let mut paginator = Paginator::<T>::new(self.get_client(), route.to_string(), since, limit); let mut all_items = Vec::<T>::new(); while let Some(mut new_items) = paginator.next() { all_items.append(&mut new_items); } all_items } } struct Paginator<'a, T> where for<'de> T: serde::Deserialize<'de> { count: usize, route: String, client: &'a Client, since: usize, limit: usize, next_link: Option<String>, phantom: PhantomData<T> } impl<'a, T> Paginator<'a, T> where for<'de> T: serde::Deserialize<'de> { fn new(client: &Client, route: String, since: Option<usize>, limit: Option<usize>) -> Paginator<T> { Paginator { count: 0, route, client, since: since.unwrap_or(1), limit: limit.unwrap_or(100), next_link: None, phantom: PhantomData } } fn get_next_link_from(&self, r: Response) -> Option<String> { match r.headers().get("Link") { None => None, Some(link_header) => { let pagination_link: Result<&str, _> = link_header.to_str(); match pagination_link { Ok(header) => { let links: Vec<&str> = header.split(";").collect(); if links.len() > 0 { let rgx = Regex::new(r"[<>]").unwrap(); Some(rgx.replace_all(links[0], "").to_string()) } else { None } }, Err(e) => panic!(e) } } } } fn deserialize_new_items_from<R>(&self, response: &mut Response) -> Vec<R> where for<'de> R: serde::Deserialize<'de> { let deserialized = response.json::<Vec<R>>(); let new_items = match deserialized { Ok(result) => { result }, Err(error) => { panic!(format!("Unable to deserialize response body: {}", error)); } }; new_items } } impl<'a, T> Iterator for Paginator<'a, T> where for<'de> T: serde::Deserialize<'de> { type Item = Vec<T>; fn next(&mut self) -> Option<Self::Item> { let mut response = match &self.next_link { None => { self.client .get(format!("{}?since={}", self.route, self.since).as_str()) .send() .unwrap() }, Some(next_link) => { self.client .get(next_link.as_str()) .send() .unwrap() } }; let mut new_items = self.deserialize_new_items_from(&mut response); let len = new_items.len(); if len == 0 || self.count >= self.limit { None } else if len + self.count > self.limit { let remainder = self.limit - self.count; self.count += remainder; new_items.truncate(remainder); Some(new_items) } else { self.count += new_items.len(); self.next_link = self.get_next_link_from(response); Some(new_items) } } }
get
create_terminals_bi.py
#!/usr/bin/env python import sys from hunmisc.corpustools.tsv_tools import sentence_iterator from common import sanitize_word TEMPLATE = ('{0} -> {1}_{0}\n[graph] "({1}<root> / {1})"\n' + '[fourlang] "({1}<root> / {1})"\n') def main(): seen = set() with open(sys.argv[1]) as stream: for sentence in sentence_iterator(stream, comment_tag='#'): for tok in sentence: word = sanitize_word(tok[1]) pos = tok[3] if (word, pos) not in seen:
if __name__ == "__main__": main()
print(TEMPLATE.format(pos, word)) seen.add((word, pos))
generated.rs
// ================================================================= // // * WARNING * // // This file is generated! // // Changes made to this file will be overwritten. If changes are // required to the generated code, the service_crategen project // must be updated to generate the changes. // // ================================================================= use std::error::Error; use std::fmt; use std::io; #[allow(warnings)] use futures::future; use futures::Future; use rusoto_core::region; use rusoto_core::request::DispatchSignedRequest; use rusoto_core::{Client, RusotoFuture}; use rusoto_core::credential::{CredentialsError, ProvideAwsCredentials}; use rusoto_core::request::HttpDispatchError; use rusoto_core::param::{Params, ServiceParams}; use rusoto_core::signature::SignedRequest; use rusoto_core::xmlerror::*; use rusoto_core::xmlutil::{ characters, end_element, find_start_element, peek_at_name, skip_tree, start_element, }; use rusoto_core::xmlutil::{Next, Peek, XmlParseError, XmlResponse}; use serde_urlencoded; use std::str::FromStr; use xml::reader::ParserConfig; use xml::reader::XmlEvent; use xml::EventReader; enum DeserializerNext { Close, Skip, Element(String), } #[derive(Default, Debug, Clone, PartialEq)] pub struct AddRoleToDBClusterMessage { /// <p>The name of the DB cluster to associate the IAM role with.</p> pub db_cluster_identifier: String, /// <p>The Amazon Resource Name (ARN) of the IAM role to associate with the Neptune DB cluster, for example <code>arn:aws:iam::123456789012:role/NeptuneAccessRole</code>.</p> pub role_arn: String, } /// Serialize `AddRoleToDBClusterMessage` contents to a `SignedRequest`. struct AddRoleToDBClusterMessageSerializer; impl AddRoleToDBClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &AddRoleToDBClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, ); params.put(&format!("{}{}", prefix, "RoleArn"), &obj.role_arn); } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct AddSourceIdentifierToSubscriptionMessage { /// <p><p>The identifier of the event source to be added.</p> <p>Constraints:</p> <ul> <li> <p>If the source type is a DB instance, then a <code>DBInstanceIdentifier</code> must be supplied.</p> </li> <li> <p>If the source type is a DB security group, a <code>DBSecurityGroupName</code> must be supplied.</p> </li> <li> <p>If the source type is a DB parameter group, a <code>DBParameterGroupName</code> must be supplied.</p> </li> <li> <p>If the source type is a DB snapshot, a <code>DBSnapshotIdentifier</code> must be supplied.</p> </li> </ul></p> pub source_identifier: String, /// <p>The name of the event notification subscription you want to add a source identifier to.</p> pub subscription_name: String, } /// Serialize `AddSourceIdentifierToSubscriptionMessage` contents to a `SignedRequest`. struct AddSourceIdentifierToSubscriptionMessageSerializer; impl AddSourceIdentifierToSubscriptionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &AddSourceIdentifierToSubscriptionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "SourceIdentifier"), &obj.source_identifier, ); params.put( &format!("{}{}", prefix, "SubscriptionName"), &obj.subscription_name, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct AddSourceIdentifierToSubscriptionResult { pub event_subscription: Option<EventSubscription>, } struct AddSourceIdentifierToSubscriptionResultDeserializer; impl AddSourceIdentifierToSubscriptionResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AddSourceIdentifierToSubscriptionResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AddSourceIdentifierToSubscriptionResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EventSubscription" => { obj.event_subscription = Some(try!( EventSubscriptionDeserializer::deserialize("EventSubscription", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct AddTagsToResourceMessage { /// <p>The Amazon Neptune resource that the tags are added to. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see <a href="http://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing"> Constructing an Amazon Resource Name (ARN)</a>.</p> pub resource_name: String, /// <p>The tags to be assigned to the Amazon Neptune resource.</p> pub tags: Vec<Tag>, } /// Serialize `AddTagsToResourceMessage` contents to a `SignedRequest`. struct AddTagsToResourceMessageSerializer; impl AddTagsToResourceMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &AddTagsToResourceMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "ResourceName"), &obj.resource_name); TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), &obj.tags); } } struct ApplyMethodDeserializer; impl ApplyMethodDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ApplyPendingMaintenanceActionMessage { /// <p>The pending maintenance action to apply to this resource.</p> <p>Valid values: <code>system-update</code>, <code>db-upgrade</code> </p> pub apply_action: String, /// <p><p>A value that specifies the type of opt-in request, or undoes an opt-in request. An opt-in request of type <code>immediate</code> can&#39;t be undone.</p> <p>Valid values:</p> <ul> <li> <p> <code>immediate</code> - Apply the maintenance action immediately.</p> </li> <li> <p> <code>next-maintenance</code> - Apply the maintenance action during the next maintenance window for the resource.</p> </li> <li> <p> <code>undo-opt-in</code> - Cancel any existing <code>next-maintenance</code> opt-in requests.</p> </li> </ul></p> pub opt_in_type: String, /// <p>The Amazon Resource Name (ARN) of the resource that the pending maintenance action applies to. For information about creating an ARN, see <a href="http://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing"> Constructing an Amazon Resource Name (ARN)</a>.</p> pub resource_identifier: String, } /// Serialize `ApplyPendingMaintenanceActionMessage` contents to a `SignedRequest`. struct ApplyPendingMaintenanceActionMessageSerializer; impl ApplyPendingMaintenanceActionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ApplyPendingMaintenanceActionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "ApplyAction"), &obj.apply_action); params.put(&format!("{}{}", prefix, "OptInType"), &obj.opt_in_type); params.put( &format!("{}{}", prefix, "ResourceIdentifier"), &obj.resource_identifier, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct ApplyPendingMaintenanceActionResult { pub resource_pending_maintenance_actions: Option<ResourcePendingMaintenanceActions>, } struct ApplyPendingMaintenanceActionResultDeserializer; impl ApplyPendingMaintenanceActionResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ApplyPendingMaintenanceActionResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ApplyPendingMaintenanceActionResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ResourcePendingMaintenanceActions" => { obj.resource_pending_maintenance_actions = Some(try!( ResourcePendingMaintenanceActionsDeserializer::deserialize( "ResourcePendingMaintenanceActions", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct AttributeValueListDeserializer; impl AttributeValueListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "AttributeValue" { obj.push(try!(StringDeserializer::deserialize( "AttributeValue", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Serialize `AttributeValueList` contents to a `SignedRequest`. struct AttributeValueListSerializer; impl AttributeValueListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } /// <p><p>Contains Availability Zone information.</p> <p> This data type is used as an element in the following data type:</p> <ul> <li> <p> <a>OrderableDBInstanceOption</a> </p> </li> </ul></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct AvailabilityZone { /// <p>The name of the availability zone.</p> pub name: Option<String>, } struct AvailabilityZoneDeserializer; impl AvailabilityZoneDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<AvailabilityZone, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = AvailabilityZone::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Name" => { obj.name = Some(try!(StringDeserializer::deserialize("Name", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct AvailabilityZoneListDeserializer; impl AvailabilityZoneListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<AvailabilityZone>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "AvailabilityZone" { obj.push(try!(AvailabilityZoneDeserializer::deserialize( "AvailabilityZone", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct AvailabilityZonesDeserializer; impl AvailabilityZonesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "AvailabilityZone" { obj.push(try!(StringDeserializer::deserialize( "AvailabilityZone", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Serialize `AvailabilityZones` contents to a `SignedRequest`. struct AvailabilityZonesSerializer; impl AvailabilityZonesSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } struct BooleanDeserializer; impl BooleanDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct BooleanOptionalDeserializer; impl BooleanOptionalDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<bool, XmlParseError> { try!(start_element(tag_name, stack)); let obj = bool::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> This data type is used as a response element in the action <a>DescribeDBEngineVersions</a>. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CharacterSet { /// <p>The description of the character set.</p> pub character_set_description: Option<String>, /// <p>The name of the character set.</p> pub character_set_name: Option<String>, } struct CharacterSetDeserializer; impl CharacterSetDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CharacterSet, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CharacterSet::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CharacterSetDescription" => { obj.character_set_description = Some(try!( StringDeserializer::deserialize("CharacterSetDescription", stack) )); } "CharacterSetName" => { obj.character_set_name = Some(try!(StringDeserializer::deserialize( "CharacterSetName", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CloudwatchLogsExportConfiguration { /// <p>The list of log types to disable.</p> pub disable_log_types: Option<Vec<String>>, /// <p>The list of log types to enable.</p> pub enable_log_types: Option<Vec<String>>, } /// Serialize `CloudwatchLogsExportConfiguration` contents to a `SignedRequest`. struct CloudwatchLogsExportConfigurationSerializer; impl CloudwatchLogsExportConfigurationSerializer { fn serialize(params: &mut Params, name: &str, obj: &CloudwatchLogsExportConfiguration) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.disable_log_types { LogTypeListSerializer::serialize( params, &format!("{}{}", prefix, "DisableLogTypes"), field_value, ); } if let Some(ref field_value) = obj.enable_log_types { LogTypeListSerializer::serialize( params, &format!("{}{}", prefix, "EnableLogTypes"), field_value, ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CopyDBClusterParameterGroupMessage { /// <p><p>The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter group. For information about creating an ARN, see <a href="http://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing"> Constructing an Amazon Resource Name (ARN)</a>. </p> <p>Constraints:</p> <ul> <li> <p>Must specify a valid DB cluster parameter group.</p> </li> <li> <p>If the source DB cluster parameter group is in the same AWS Region as the copy, specify a valid DB parameter group identifier, for example <code>my-db-cluster-param-group</code>, or a valid ARN.</p> </li> <li> <p>If the source DB parameter group is in a different AWS Region than the copy, specify a valid DB cluster parameter group ARN, for example <code>arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1</code>.</p> </li> </ul></p> pub source_db_cluster_parameter_group_identifier: String, pub tags: Option<Vec<Tag>>, /// <p>A description for the copied DB cluster parameter group.</p> pub target_db_cluster_parameter_group_description: String, /// <p>The identifier for the copied DB cluster parameter group.</p> <p>Constraints:</p> <ul> <li> <p>Cannot be null, empty, or blank</p> </li> <li> <p>Must contain from 1 to 255 letters, numbers, or hyphens</p> </li> <li> <p>First character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> </ul> <p>Example: <code>my-cluster-param-group1</code> </p> pub target_db_cluster_parameter_group_identifier: String, } /// Serialize `CopyDBClusterParameterGroupMessage` contents to a `SignedRequest`. struct CopyDBClusterParameterGroupMessageSerializer; impl CopyDBClusterParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CopyDBClusterParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "SourceDBClusterParameterGroupIdentifier"), &obj.source_db_cluster_parameter_group_identifier, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } params.put( &format!("{}{}", prefix, "TargetDBClusterParameterGroupDescription"), &obj.target_db_cluster_parameter_group_description, ); params.put( &format!("{}{}", prefix, "TargetDBClusterParameterGroupIdentifier"), &obj.target_db_cluster_parameter_group_identifier, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CopyDBClusterParameterGroupResult { pub db_cluster_parameter_group: Option<DBClusterParameterGroup>, } struct CopyDBClusterParameterGroupResultDeserializer; impl CopyDBClusterParameterGroupResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CopyDBClusterParameterGroupResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CopyDBClusterParameterGroupResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterParameterGroup" => { obj.db_cluster_parameter_group = Some(try!(DBClusterParameterGroupDeserializer::deserialize( "DBClusterParameterGroup", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CopyDBClusterSnapshotMessage { /// <p>True to copy all tags from the source DB cluster snapshot to the target DB cluster snapshot, and otherwise false. The default is false.</p> pub copy_tags: Option<bool>, /// <p>The AWS AWS KMS key ID for an encrypted DB cluster snapshot. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key. </p> <p>If you copy an unencrypted DB cluster snapshot and specify a value for the <code>KmsKeyId</code> parameter, Amazon Neptune encrypts the target DB cluster snapshot using the specified KMS encryption key. </p> <p>If you copy an encrypted DB cluster snapshot from your AWS account, you can specify a value for <code>KmsKeyId</code> to encrypt the copy with a new KMS encryption key. If you don't specify a value for <code>KmsKeyId</code>, then the copy of the DB cluster snapshot is encrypted with the same KMS key as the source DB cluster snapshot. </p> <p>If you copy an encrypted DB cluster snapshot that is shared from another AWS account, then you must specify a value for <code>KmsKeyId</code>. </p> <p>To copy an encrypted DB cluster snapshot to another AWS Region, you must set <code>KmsKeyId</code> to the KMS key ID you want to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. KMS encryption keys are specific to the AWS Region that they are created in, and you can't use encryption keys from one AWS Region in another AWS Region.</p> pub kms_key_id: Option<String>, /// <p>The URL that contains a Signature Version 4 signed request for the <code>CopyDBClusterSnapshot</code> API action in the AWS Region that contains the source DB cluster snapshot to copy. The <code>PreSignedUrl</code> parameter must be used when copying an encrypted DB cluster snapshot from another AWS Region.</p> <p>The pre-signed URL must be a valid request for the <code>CopyDBSClusterSnapshot</code> API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied. The pre-signed URL request must contain the following parameter values:</p> <ul> <li> <p> <code>KmsKeyId</code> - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the <code>CopyDBClusterSnapshot</code> action that is called in the destination AWS Region, and the action contained in the pre-signed URL.</p> </li> <li> <p> <code>DestinationRegion</code> - The name of the AWS Region that the DB cluster snapshot will be created in.</p> </li> <li> <p> <code>SourceDBClusterSnapshotIdentifier</code> - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your <code>SourceDBClusterSnapshotIdentifier</code> looks like the following example: <code>arn:aws:rds:us-west-2:123456789012:cluster-snapshot:neptune-cluster1-snapshot-20161115</code>.</p> </li> </ul> <p>To learn how to generate a Signature Version 4 signed request, see <a href="http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html"> Authenticating Requests: Using Query Parameters (AWS Signature Version 4)</a> and <a href="http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html"> Signature Version 4 Signing Process</a>.</p> pub pre_signed_url: Option<String>, /// <p>The identifier of the DB cluster snapshot to copy. This parameter is not case-sensitive.</p> <p>You can't copy an encrypted, shared DB cluster snapshot from one AWS Region to another.</p> <p>Constraints:</p> <ul> <li> <p>Must specify a valid system snapshot in the "available" state.</p> </li> <li> <p>If the source snapshot is in the same AWS Region as the copy, specify a valid DB snapshot identifier.</p> </li> <li> <p>If the source snapshot is in a different AWS Region than the copy, specify a valid DB cluster snapshot ARN. </p> </li> </ul> <p>Example: <code>my-cluster-snapshot1</code> </p> pub source_db_cluster_snapshot_identifier: String, pub tags: Option<Vec<Tag>>, /// <p>The identifier of the new DB cluster snapshot to create from the source DB cluster snapshot. This parameter is not case-sensitive.</p> <p>Constraints:</p> <ul> <li> <p>Must contain from 1 to 63 letters, numbers, or hyphens.</p> </li> <li> <p>First character must be a letter.</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens.</p> </li> </ul> <p>Example: <code>my-cluster-snapshot2</code> </p> pub target_db_cluster_snapshot_identifier: String, } /// Serialize `CopyDBClusterSnapshotMessage` contents to a `SignedRequest`. struct CopyDBClusterSnapshotMessageSerializer; impl CopyDBClusterSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CopyDBClusterSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.copy_tags { params.put( &format!("{}{}", prefix, "CopyTags"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } if let Some(ref field_value) = obj.pre_signed_url { params.put(&format!("{}{}", prefix, "PreSignedUrl"), &field_value); } params.put( &format!("{}{}", prefix, "SourceDBClusterSnapshotIdentifier"), &obj.source_db_cluster_snapshot_identifier, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } params.put( &format!("{}{}", prefix, "TargetDBClusterSnapshotIdentifier"), &obj.target_db_cluster_snapshot_identifier, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CopyDBClusterSnapshotResult { pub db_cluster_snapshot: Option<DBClusterSnapshot>, } struct CopyDBClusterSnapshotResultDeserializer; impl CopyDBClusterSnapshotResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CopyDBClusterSnapshotResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CopyDBClusterSnapshotResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterSnapshot" => { obj.db_cluster_snapshot = Some(try!( DBClusterSnapshotDeserializer::deserialize("DBClusterSnapshot", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CopyDBParameterGroupMessage { /// <p><p> The identifier or ARN for the source DB parameter group. For information about creating an ARN, see <a href="http://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing"> Constructing an Amazon Resource Name (ARN)</a>. </p> <p>Constraints:</p> <ul> <li> <p>Must specify a valid DB parameter group.</p> </li> <li> <p>Must specify a valid DB parameter group identifier, for example <code>my-db-param-group</code>, or a valid ARN.</p> </li> </ul></p> pub source_db_parameter_group_identifier: String, pub tags: Option<Vec<Tag>>, /// <p>A description for the copied DB parameter group.</p> pub target_db_parameter_group_description: String, /// <p>The identifier for the copied DB parameter group.</p> <p>Constraints:</p> <ul> <li> <p>Cannot be null, empty, or blank</p> </li> <li> <p>Must contain from 1 to 255 letters, numbers, or hyphens</p> </li> <li> <p>First character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> </ul> <p>Example: <code>my-db-parameter-group</code> </p> pub target_db_parameter_group_identifier: String, } /// Serialize `CopyDBParameterGroupMessage` contents to a `SignedRequest`. struct CopyDBParameterGroupMessageSerializer; impl CopyDBParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CopyDBParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "SourceDBParameterGroupIdentifier"), &obj.source_db_parameter_group_identifier, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } params.put( &format!("{}{}", prefix, "TargetDBParameterGroupDescription"), &obj.target_db_parameter_group_description, ); params.put( &format!("{}{}", prefix, "TargetDBParameterGroupIdentifier"), &obj.target_db_parameter_group_identifier, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CopyDBParameterGroupResult { pub db_parameter_group: Option<DBParameterGroup>, } struct CopyDBParameterGroupResultDeserializer; impl CopyDBParameterGroupResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CopyDBParameterGroupResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CopyDBParameterGroupResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBParameterGroup" => { obj.db_parameter_group = Some(try!( DBParameterGroupDeserializer::deserialize("DBParameterGroup", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBClusterMessage { /// <p>A list of EC2 Availability Zones that instances in the DB cluster can be created in. </p> pub availability_zones: Option<Vec<String>>, /// <p><p>The number of days for which automated backups are retained. You must specify a minimum value of 1.</p> <p>Default: 1</p> <p>Constraints:</p> <ul> <li> <p>Must be a value from 1 to 35</p> </li> </ul></p> pub backup_retention_period: Option<i64>, /// <p>A value that indicates that the DB cluster should be associated with the specified CharacterSet.</p> pub character_set_name: Option<String>, /// <p>The DB cluster identifier. This parameter is stored as a lowercase string.</p> <p>Constraints:</p> <ul> <li> <p>Must contain from 1 to 63 letters, numbers, or hyphens.</p> </li> <li> <p>First character must be a letter.</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens.</p> </li> </ul> <p>Example: <code>my-cluster1</code> </p> pub db_cluster_identifier: String, /// <p><p> The name of the DB cluster parameter group to associate with this DB cluster. If this argument is omitted, the default is used. </p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match the name of an existing DBClusterParameterGroup.</p> </li> </ul></p> pub db_cluster_parameter_group_name: Option<String>, /// <p>A DB subnet group to associate with this DB cluster.</p> <p>Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.</p> <p>Example: <code>mySubnetgroup</code> </p> pub db_subnet_group_name: Option<String>, /// <p>The name for your database of up to 64 alpha-numeric characters. If you do not provide a name, Amazon Neptune will not create a database in the DB cluster you are creating.</p> pub database_name: Option<String>, /// <p>True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.</p> <p>Default: <code>false</code> </p> pub enable_iam_database_authentication: Option<bool>, /// <p>The name of the database engine to be used for this DB cluster.</p> <p>Valid Values: <code>neptune</code> </p> pub engine: String, /// <p>The version number of the database engine to use.</p> <p>Example: <code>1.0.1</code> </p> pub engine_version: Option<String>, /// <p>The AWS KMS key identifier for an encrypted DB cluster.</p> <p>The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.</p> <p>If an encryption key is not specified in <code>KmsKeyId</code>:</p> <ul> <li> <p>If <code>ReplicationSourceIdentifier</code> identifies an encrypted source, then Amazon Neptune will use the encryption key used to encrypt the source. Otherwise, Amazon Neptune will use your default encryption key. </p> </li> <li> <p>If the <code>StorageEncrypted</code> parameter is true and <code>ReplicationSourceIdentifier</code> is not specified, then Amazon Neptune will use your default encryption key.</p> </li> </ul> <p>AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.</p> <p>If you create a Read Replica of an encrypted DB cluster in another AWS Region, you must set <code>KmsKeyId</code> to a KMS key ID that is valid in the destination AWS Region. This key is used to encrypt the Read Replica in that AWS Region.</p> pub kms_key_id: Option<String>, /// <p>The password for the master database user. This password can contain any printable ASCII character except "/", """, or "@".</p> <p>Constraints: Must contain from 8 to 41 characters.</p> pub master_user_password: Option<String>, /// <p><p>The name of the master user for the DB cluster.</p> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 16 letters or numbers.</p> </li> <li> <p>First character must be a letter.</p> </li> <li> <p>Cannot be a reserved word for the chosen database engine.</p> </li> </ul></p> pub master_username: Option<String>, /// <p>A value that indicates that the DB cluster should be associated with the specified option group.</p> <p>Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.</p> pub option_group_name: Option<String>, /// <p>The port number on which the instances in the DB cluster accept connections.</p> <p> Default: <code>8182</code> </p> pub port: Option<i64>, /// <p>A URL that contains a Signature Version 4 signed request for the <code>CreateDBCluster</code> action to be called in the source AWS Region where the DB cluster is replicated from. You only need to specify <code>PreSignedUrl</code> when you are performing cross-region replication from an encrypted DB cluster.</p> <p>The pre-signed URL must be a valid request for the <code>CreateDBCluster</code> API action that can be executed in the source AWS Region that contains the encrypted DB cluster to be copied.</p> <p>The pre-signed URL request must contain the following parameter values:</p> <ul> <li> <p> <code>KmsKeyId</code> - The AWS KMS key identifier for the key to use to encrypt the copy of the DB cluster in the destination AWS Region. This should refer to the same KMS key for both the <code>CreateDBCluster</code> action that is called in the destination AWS Region, and the action contained in the pre-signed URL.</p> </li> <li> <p> <code>DestinationRegion</code> - The name of the AWS Region that Read Replica will be created in.</p> </li> <li> <p> <code>ReplicationSourceIdentifier</code> - The DB cluster identifier for the encrypted DB cluster to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster from the us-west-2 AWS Region, then your <code>ReplicationSourceIdentifier</code> would look like Example: <code>arn:aws:rds:us-west-2:123456789012:cluster:neptune-cluster1</code>.</p> </li> </ul> <p>To learn how to generate a Signature Version 4 signed request, see <a href="http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html"> Authenticating Requests: Using Query Parameters (AWS Signature Version 4)</a> and <a href="http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html"> Signature Version 4 Signing Process</a>.</p> pub pre_signed_url: Option<String>, /// <p><p>The daily time range during which automated backups are created if automated backups are enabled using the <code>BackupRetentionPeriod</code> parameter. </p> <p>The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. To see the time blocks available, see <a href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html"> Adjusting the Preferred Maintenance Window</a> in the <i>Amazon Neptune User Guide.</i> </p> <p>Constraints:</p> <ul> <li> <p>Must be in the format <code>hh24:mi-hh24:mi</code>.</p> </li> <li> <p>Must be in Universal Coordinated Time (UTC).</p> </li> <li> <p>Must not conflict with the preferred maintenance window.</p> </li> <li> <p>Must be at least 30 minutes.</p> </li> </ul></p> pub preferred_backup_window: Option<String>, /// <p>The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).</p> <p>Format: <code>ddd:hh24:mi-ddd:hh24:mi</code> </p> <p>The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. To see the time blocks available, see <a href="http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html"> Adjusting the Preferred Maintenance Window</a> in the <i>Amazon Neptune User Guide.</i> </p> <p>Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.</p> <p>Constraints: Minimum 30-minute window.</p> pub preferred_maintenance_window: Option<String>, /// <p>The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica.</p> pub replication_source_identifier: Option<String>, /// <p>Specifies whether the DB cluster is encrypted.</p> pub storage_encrypted: Option<bool>, pub tags: Option<Vec<Tag>>, /// <p>A list of EC2 VPC security groups to associate with this DB cluster.</p> pub vpc_security_group_ids: Option<Vec<String>>, } /// Serialize `CreateDBClusterMessage` contents to a `SignedRequest`. struct CreateDBClusterMessageSerializer; impl CreateDBClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateDBClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.availability_zones { AvailabilityZonesSerializer::serialize( params, &format!("{}{}", prefix, "AvailabilityZone"), field_value, ); } if let Some(ref field_value) = obj.backup_retention_period { params.put( &format!("{}{}", prefix, "BackupRetentionPeriod"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.character_set_name { params.put(&format!("{}{}", prefix, "CharacterSetName"), &field_value); } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, ); if let Some(ref field_value) = obj.db_cluster_parameter_group_name { params.put( &format!("{}{}", prefix, "DBClusterParameterGroupName"), &field_value, ); } if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } if let Some(ref field_value) = obj.database_name { params.put(&format!("{}{}", prefix, "DatabaseName"), &field_value); } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), &field_value.to_string(), ); } params.put(&format!("{}{}", prefix, "Engine"), &obj.engine); if let Some(ref field_value) = obj.engine_version { params.put(&format!("{}{}", prefix, "EngineVersion"), &field_value); } if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } if let Some(ref field_value) = obj.master_user_password { params.put(&format!("{}{}", prefix, "MasterUserPassword"), &field_value); } if let Some(ref field_value) = obj.master_username { params.put(&format!("{}{}", prefix, "MasterUsername"), &field_value); } if let Some(ref field_value) = obj.option_group_name { params.put(&format!("{}{}", prefix, "OptionGroupName"), &field_value); } if let Some(ref field_value) = obj.port { params.put(&format!("{}{}", prefix, "Port"), &field_value.to_string()); } if let Some(ref field_value) = obj.pre_signed_url { params.put(&format!("{}{}", prefix, "PreSignedUrl"), &field_value); } if let Some(ref field_value) = obj.preferred_backup_window { params.put( &format!("{}{}", prefix, "PreferredBackupWindow"), &field_value, ); } if let Some(ref field_value) = obj.preferred_maintenance_window { params.put( &format!("{}{}", prefix, "PreferredMaintenanceWindow"), &field_value, ); } if let Some(ref field_value) = obj.replication_source_identifier { params.put( &format!("{}{}", prefix, "ReplicationSourceIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.storage_encrypted { params.put( &format!("{}{}", prefix, "StorageEncrypted"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } if let Some(ref field_value) = obj.vpc_security_group_ids { VpcSecurityGroupIdListSerializer::serialize( params, &format!("{}{}", prefix, "VpcSecurityGroupId"), field_value, ); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBClusterParameterGroupMessage { /// <p><p>The name of the DB cluster parameter group.</p> <p>Constraints:</p> <ul> <li> <p>Must match the name of an existing DBClusterParameterGroup.</p> </li> </ul> <note> <p>This value is stored as a lowercase string.</p> </note></p> pub db_cluster_parameter_group_name: String, /// <p>The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.</p> pub db_parameter_group_family: String, /// <p>The description for the DB cluster parameter group.</p> pub description: String, pub tags: Option<Vec<Tag>>, } /// Serialize `CreateDBClusterParameterGroupMessage` contents to a `SignedRequest`. struct CreateDBClusterParameterGroupMessageSerializer; impl CreateDBClusterParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateDBClusterParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterParameterGroupName"), &obj.db_cluster_parameter_group_name, ); params.put( &format!("{}{}", prefix, "DBParameterGroupFamily"), &obj.db_parameter_group_family, ); params.put(&format!("{}{}", prefix, "Description"), &obj.description); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBClusterParameterGroupResult { pub db_cluster_parameter_group: Option<DBClusterParameterGroup>, } struct CreateDBClusterParameterGroupResultDeserializer; impl CreateDBClusterParameterGroupResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateDBClusterParameterGroupResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CreateDBClusterParameterGroupResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterParameterGroup" => { obj.db_cluster_parameter_group = Some(try!(DBClusterParameterGroupDeserializer::deserialize( "DBClusterParameterGroup", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBClusterResult { pub db_cluster: Option<DBCluster>, } struct CreateDBClusterResultDeserializer; impl CreateDBClusterResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateDBClusterResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CreateDBClusterResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBCluster" => { obj.db_cluster = Some(try!(DBClusterDeserializer::deserialize("DBCluster", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBClusterSnapshotMessage { /// <p>The identifier of the DB cluster to create a snapshot for. This parameter is not case-sensitive.</p> <p>Constraints:</p> <ul> <li> <p>Must match the identifier of an existing DBCluster.</p> </li> </ul> <p>Example: <code>my-cluster1</code> </p> pub db_cluster_identifier: String, /// <p>The identifier of the DB cluster snapshot. This parameter is stored as a lowercase string.</p> <p>Constraints:</p> <ul> <li> <p>Must contain from 1 to 63 letters, numbers, or hyphens.</p> </li> <li> <p>First character must be a letter.</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens.</p> </li> </ul> <p>Example: <code>my-cluster1-snapshot1</code> </p> pub db_cluster_snapshot_identifier: String, /// <p>The tags to be assigned to the DB cluster snapshot.</p> pub tags: Option<Vec<Tag>>, } /// Serialize `CreateDBClusterSnapshotMessage` contents to a `SignedRequest`. struct CreateDBClusterSnapshotMessageSerializer; impl CreateDBClusterSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateDBClusterSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, ); params.put( &format!("{}{}", prefix, "DBClusterSnapshotIdentifier"), &obj.db_cluster_snapshot_identifier, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBClusterSnapshotResult { pub db_cluster_snapshot: Option<DBClusterSnapshot>, } struct CreateDBClusterSnapshotResultDeserializer; impl CreateDBClusterSnapshotResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateDBClusterSnapshotResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CreateDBClusterSnapshotResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterSnapshot" => { obj.db_cluster_snapshot = Some(try!( DBClusterSnapshotDeserializer::deserialize("DBClusterSnapshot", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBInstanceMessage { /// <p>The amount of storage (in gibibytes) to allocate for the DB instance.</p> <p>Type: Integer</p> <p>Not applicable. Neptune cluster volumes automatically grow as the amount of data in your database increases, though you are only charged for the space that you use in a Neptune cluster volume.</p> pub allocated_storage: Option<i64>, /// <p>Indicates that minor engine upgrades are applied automatically to the DB instance during the maintenance window.</p> <p>Default: <code>true</code> </p> pub auto_minor_version_upgrade: Option<bool>, /// <p> The EC2 Availability Zone that the DB instance is created in. </p> <p>Default: A random, system-chosen Availability Zone in the endpoint's AWS Region.</p> <p> Example: <code>us-east-1d</code> </p> <p> Constraint: The AvailabilityZone parameter can't be specified if the MultiAZ parameter is set to <code>true</code>. The specified Availability Zone must be in the same AWS Region as the current endpoint. </p> pub availability_zone: Option<String>, /// <p><p>The number of days for which automated backups are retained.</p> <p>Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see <a>CreateDBCluster</a>.</p> <p>Default: 1</p> <p>Constraints:</p> <ul> <li> <p>Must be a value from 0 to 35</p> </li> <li> <p>Cannot be set to 0 if the DB instance is a source to Read Replicas</p> </li> </ul></p> pub backup_retention_period: Option<i64>, /// <p>Indicates that the DB instance should be associated with the specified CharacterSet.</p> <p>Not applicable. The character set is managed by the DB cluster. For more information, see <a>CreateDBCluster</a>.</p> pub character_set_name: Option<String>, /// <p>True to copy all tags from the DB instance to snapshots of the DB instance, and otherwise false. The default is false.</p> pub copy_tags_to_snapshot: Option<bool>, /// <p>The identifier of the DB cluster that the instance will belong to.</p> <p>For information on creating a DB cluster, see <a>CreateDBCluster</a>.</p> <p>Type: String</p> pub db_cluster_identifier: Option<String>, /// <p>The compute and memory capacity of the DB instance, for example, <code>db.m4.large</code>. Not all DB instance classes are available in all AWS Regions. </p> pub db_instance_class: String, /// <p>The DB instance identifier. This parameter is stored as a lowercase string.</p> <p>Constraints:</p> <ul> <li> <p>Must contain from 1 to 63 letters, numbers, or hyphens.</p> </li> <li> <p>First character must be a letter.</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens.</p> </li> </ul> <p>Example: <code>mydbinstance</code> </p> pub db_instance_identifier: String, /// <p>The database name. </p> <p>Type: String</p> pub db_name: Option<String>, /// <p><p>The name of the DB parameter group to associate with this DB instance. If this argument is omitted, the default DBParameterGroup for the specified engine is used.</p> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 255 letters, numbers, or hyphens.</p> </li> <li> <p>First character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> </ul></p> pub db_parameter_group_name: Option<String>, /// <p>A list of DB security groups to associate with this DB instance.</p> <p>Default: The default DB security group for the database engine.</p> pub db_security_groups: Option<Vec<String>>, /// <p>A DB subnet group to associate with this DB instance.</p> <p>If there is no DB subnet group, then it is a non-VPC DB instance.</p> pub db_subnet_group_name: Option<String>, /// <p>Specify the Active Directory Domain to create the instance in.</p> pub domain: Option<String>, /// <p>Specify the name of the IAM role to be used when making API calls to the Directory Service.</p> pub domain_iam_role_name: Option<String>, /// <p>The list of log types that need to be enabled for exporting to CloudWatch Logs.</p> pub enable_cloudwatch_logs_exports: Option<Vec<String>>, /// <p>True to enable AWS Identity and Access Management (IAM) authentication for Neptune.</p> <p>Default: <code>false</code> </p> pub enable_iam_database_authentication: Option<bool>, /// <p>True to enable Performance Insights for the DB instance, and otherwise false. </p> pub enable_performance_insights: Option<bool>, /// <p>The name of the database engine to be used for this instance. </p> <p>Valid Values: <code>neptune</code> </p> pub engine: String, /// <p>The version number of the database engine to use.</p> pub engine_version: Option<String>, /// <p>The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for the DB instance. </p> pub iops: Option<i64>, /// <p>The AWS KMS key identifier for an encrypted DB instance.</p> <p>The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key.</p> <p>Not applicable. The KMS key identifier is managed by the DB cluster. For more information, see <a>CreateDBCluster</a>.</p> <p>If the <code>StorageEncrypted</code> parameter is true, and you do not specify a value for the <code>KmsKeyId</code> parameter, then Amazon Neptune will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS Region.</p> pub kms_key_id: Option<String>, /// <p>License model information for this DB instance.</p> <p> Valid values: <code>license-included</code> | <code>bring-your-own-license</code> | <code>general-public-license</code> </p> pub license_model: Option<String>, /// <p>The password for the master user. The password can include any printable ASCII character except "/", """, or "@".</p> <p> Not used. </p> pub master_user_password: Option<String>, /// <p>The name for the master user. Not used.</p> pub master_username: Option<String>, /// <p>The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.</p> <p>If <code>MonitoringRoleArn</code> is specified, then you must also set <code>MonitoringInterval</code> to a value other than 0.</p> <p>Valid Values: <code>0, 1, 5, 10, 15, 30, 60</code> </p> pub monitoring_interval: Option<i64>, /// <p>The ARN for the IAM role that permits Neptune to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, <code>arn:aws:iam:123456789012:role/emaccess</code>.</p> <p>If <code>MonitoringInterval</code> is set to a value other than 0, then you must supply a <code>MonitoringRoleArn</code> value.</p> pub monitoring_role_arn: Option<String>, /// <p>Specifies if the DB instance is a Multi-AZ deployment. You can't set the AvailabilityZone parameter if the MultiAZ parameter is set to true.</p> pub multi_az: Option<bool>, /// <p>Indicates that the DB instance should be associated with the specified option group.</p> <p>Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance</p> pub option_group_name: Option<String>, /// <p>The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.</p> pub performance_insights_kms_key_id: Option<String>, /// <p>The port number on which the database accepts connections.</p> <p>Not applicable. The port is managed by the DB cluster. For more information, see <a>CreateDBCluster</a>.</p> <p> Default: <code>8182</code> </p> <p>Type: Integer</p> pub port: Option<i64>, /// <p> The daily time range during which automated backups are created. </p> <p>Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see <a>CreateDBCluster</a>.</p> pub preferred_backup_window: Option<String>, /// <p>The time range each week during which system maintenance can occur, in Universal Coordinated Time (UTC). </p> <p> Format: <code>ddd:hh24:mi-ddd:hh24:mi</code> </p> <p>The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. </p> <p>Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.</p> <p>Constraints: Minimum 30-minute window.</p> pub preferred_maintenance_window: Option<String>, /// <p>A value that specifies the order in which an Read Replica is promoted to the primary instance after a failure of the existing primary instance. </p> <p>Default: 1</p> <p>Valid Values: 0 - 15</p> pub promotion_tier: Option<i64>, /// <p>Specifies whether the DB instance is encrypted.</p> <p>Not applicable. The encryption for DB instances is managed by the DB cluster. For more information, see <a>CreateDBCluster</a>.</p> <p>Default: false</p> pub storage_encrypted: Option<bool>, /// <p>Specifies the storage type to be associated with the DB instance.</p> <p>Not applicable. Storage is managed by the DB Cluster.</p> pub storage_type: Option<String>, pub tags: Option<Vec<Tag>>, /// <p>The ARN from the key store with which to associate the instance for TDE encryption.</p> pub tde_credential_arn: Option<String>, /// <p>The password for the given ARN from the key store in order to access the device.</p> pub tde_credential_password: Option<String>, /// <p>The time zone of the DB instance. </p> pub timezone: Option<String>, /// <p>A list of EC2 VPC security groups to associate with this DB instance.</p> <p>Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see <a>CreateDBCluster</a>.</p> <p>Default: The default EC2 VPC security group for the DB subnet group's VPC.</p> pub vpc_security_group_ids: Option<Vec<String>>, } /// Serialize `CreateDBInstanceMessage` contents to a `SignedRequest`. struct CreateDBInstanceMessageSerializer; impl CreateDBInstanceMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateDBInstanceMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.allocated_storage { params.put( &format!("{}{}", prefix, "AllocatedStorage"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.auto_minor_version_upgrade { params.put( &format!("{}{}", prefix, "AutoMinorVersionUpgrade"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.availability_zone { params.put(&format!("{}{}", prefix, "AvailabilityZone"), &field_value); } if let Some(ref field_value) = obj.backup_retention_period { params.put( &format!("{}{}", prefix, "BackupRetentionPeriod"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.character_set_name { params.put(&format!("{}{}", prefix, "CharacterSetName"), &field_value); } if let Some(ref field_value) = obj.copy_tags_to_snapshot { params.put( &format!("{}{}", prefix, "CopyTagsToSnapshot"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.db_cluster_identifier { params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &field_value, ); } params.put( &format!("{}{}", prefix, "DBInstanceClass"), &obj.db_instance_class, ); params.put( &format!("{}{}", prefix, "DBInstanceIdentifier"), &obj.db_instance_identifier, ); if let Some(ref field_value) = obj.db_name { params.put(&format!("{}{}", prefix, "DBName"), &field_value); } if let Some(ref field_value) = obj.db_parameter_group_name { params.put( &format!("{}{}", prefix, "DBParameterGroupName"), &field_value, ); } if let Some(ref field_value) = obj.db_security_groups { DBSecurityGroupNameListSerializer::serialize( params, &format!("{}{}", prefix, "DBSecurityGroupName"), field_value, ); } if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } if let Some(ref field_value) = obj.domain { params.put(&format!("{}{}", prefix, "Domain"), &field_value); } if let Some(ref field_value) = obj.domain_iam_role_name { params.put(&format!("{}{}", prefix, "DomainIAMRoleName"), &field_value); } if let Some(ref field_value) = obj.enable_cloudwatch_logs_exports { LogTypeListSerializer::serialize( params, &format!("{}{}", prefix, "EnableCloudwatchLogsExports"), field_value, ); } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.enable_performance_insights { params.put( &format!("{}{}", prefix, "EnablePerformanceInsights"), &field_value.to_string(), ); } params.put(&format!("{}{}", prefix, "Engine"), &obj.engine); if let Some(ref field_value) = obj.engine_version { params.put(&format!("{}{}", prefix, "EngineVersion"), &field_value); } if let Some(ref field_value) = obj.iops { params.put(&format!("{}{}", prefix, "Iops"), &field_value.to_string()); } if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } if let Some(ref field_value) = obj.license_model { params.put(&format!("{}{}", prefix, "LicenseModel"), &field_value); } if let Some(ref field_value) = obj.master_user_password { params.put(&format!("{}{}", prefix, "MasterUserPassword"), &field_value); } if let Some(ref field_value) = obj.master_username { params.put(&format!("{}{}", prefix, "MasterUsername"), &field_value); } if let Some(ref field_value) = obj.monitoring_interval { params.put( &format!("{}{}", prefix, "MonitoringInterval"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.monitoring_role_arn { params.put(&format!("{}{}", prefix, "MonitoringRoleArn"), &field_value); } if let Some(ref field_value) = obj.multi_az { params.put( &format!("{}{}", prefix, "MultiAZ"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.option_group_name { params.put(&format!("{}{}", prefix, "OptionGroupName"), &field_value); } if let Some(ref field_value) = obj.performance_insights_kms_key_id { params.put( &format!("{}{}", prefix, "PerformanceInsightsKMSKeyId"), &field_value, ); } if let Some(ref field_value) = obj.port { params.put(&format!("{}{}", prefix, "Port"), &field_value.to_string()); } if let Some(ref field_value) = obj.preferred_backup_window { params.put( &format!("{}{}", prefix, "PreferredBackupWindow"), &field_value, ); } if let Some(ref field_value) = obj.preferred_maintenance_window { params.put( &format!("{}{}", prefix, "PreferredMaintenanceWindow"), &field_value, ); } if let Some(ref field_value) = obj.promotion_tier { params.put( &format!("{}{}", prefix, "PromotionTier"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.storage_encrypted { params.put( &format!("{}{}", prefix, "StorageEncrypted"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.storage_type { params.put(&format!("{}{}", prefix, "StorageType"), &field_value); } if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } if let Some(ref field_value) = obj.tde_credential_arn { params.put(&format!("{}{}", prefix, "TdeCredentialArn"), &field_value); } if let Some(ref field_value) = obj.tde_credential_password { params.put( &format!("{}{}", prefix, "TdeCredentialPassword"), &field_value, ); } if let Some(ref field_value) = obj.timezone { params.put(&format!("{}{}", prefix, "Timezone"), &field_value); } if let Some(ref field_value) = obj.vpc_security_group_ids { VpcSecurityGroupIdListSerializer::serialize( params, &format!("{}{}", prefix, "VpcSecurityGroupId"), field_value, ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBInstanceResult { pub db_instance: Option<DBInstance>, } struct CreateDBInstanceResultDeserializer; impl CreateDBInstanceResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateDBInstanceResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CreateDBInstanceResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBInstance" => { obj.db_instance = Some(try!(DBInstanceDeserializer::deserialize( "DBInstance", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBParameterGroupMessage { /// <p>The DB parameter group family name. A DB parameter group can be associated with one and only one DB parameter group family, and can be applied only to a DB instance running a database engine and engine version compatible with that DB parameter group family.</p> pub db_parameter_group_family: String, /// <p><p>The name of the DB parameter group.</p> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 255 letters, numbers, or hyphens.</p> </li> <li> <p>First character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> </ul> <note> <p>This value is stored as a lowercase string.</p> </note></p> pub db_parameter_group_name: String, /// <p>The description for the DB parameter group.</p> pub description: String, pub tags: Option<Vec<Tag>>, } /// Serialize `CreateDBParameterGroupMessage` contents to a `SignedRequest`. struct CreateDBParameterGroupMessageSerializer; impl CreateDBParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateDBParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBParameterGroupFamily"), &obj.db_parameter_group_family, ); params.put( &format!("{}{}", prefix, "DBParameterGroupName"), &obj.db_parameter_group_name, ); params.put(&format!("{}{}", prefix, "Description"), &obj.description); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBParameterGroupResult { pub db_parameter_group: Option<DBParameterGroup>, } struct CreateDBParameterGroupResultDeserializer; impl CreateDBParameterGroupResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateDBParameterGroupResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CreateDBParameterGroupResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBParameterGroup" => { obj.db_parameter_group = Some(try!( DBParameterGroupDeserializer::deserialize("DBParameterGroup", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBSubnetGroupMessage { /// <p>The description for the DB subnet group.</p> pub db_subnet_group_description: String, /// <p>The name for the DB subnet group. This value is stored as a lowercase string.</p> <p>Constraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.</p> <p>Example: <code>mySubnetgroup</code> </p> pub db_subnet_group_name: String, /// <p>The EC2 Subnet IDs for the DB subnet group.</p> pub subnet_ids: Vec<String>, pub tags: Option<Vec<Tag>>, } /// Serialize `CreateDBSubnetGroupMessage` contents to a `SignedRequest`. struct CreateDBSubnetGroupMessageSerializer; impl CreateDBSubnetGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateDBSubnetGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBSubnetGroupDescription"), &obj.db_subnet_group_description, ); params.put( &format!("{}{}", prefix, "DBSubnetGroupName"), &obj.db_subnet_group_name, ); SubnetIdentifierListSerializer::serialize( params, &format!("{}{}", prefix, "SubnetIdentifier"), &obj.subnet_ids, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateDBSubnetGroupResult { pub db_subnet_group: Option<DBSubnetGroup>, } struct CreateDBSubnetGroupResultDeserializer; impl CreateDBSubnetGroupResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateDBSubnetGroupResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CreateDBSubnetGroupResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBSubnetGroup" => { obj.db_subnet_group = Some(try!(DBSubnetGroupDeserializer::deserialize( "DBSubnetGroup", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateEventSubscriptionMessage { /// <p> A Boolean value; set to <b>true</b> to activate the subscription, set to <b>false</b> to create the subscription but not active it. </p> pub enabled: Option<bool>, /// <p> A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType by using the <b>DescribeEventCategories</b> action. </p> pub event_categories: Option<Vec<String>>, /// <p>The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.</p> pub sns_topic_arn: String, /// <p><p>The list of identifiers of the event sources for which events are returned. If not specified, then all sources are included in the response. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can&#39;t end with a hyphen or contain two consecutive hyphens.</p> <p>Constraints:</p> <ul> <li> <p>If SourceIds are supplied, SourceType must also be provided.</p> </li> <li> <p>If the source type is a DB instance, then a <code>DBInstanceIdentifier</code> must be supplied.</p> </li> <li> <p>If the source type is a DB security group, a <code>DBSecurityGroupName</code> must be supplied.</p> </li> <li> <p>If the source type is a DB parameter group, a <code>DBParameterGroupName</code> must be supplied.</p> </li> <li> <p>If the source type is a DB snapshot, a <code>DBSnapshotIdentifier</code> must be supplied.</p> </li> </ul></p> pub source_ids: Option<Vec<String>>, /// <p>The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.</p> <p>Valid values: <code>db-instance</code> | <code>db-cluster</code> | <code>db-parameter-group</code> | <code>db-security-group</code> | <code>db-snapshot</code> | <code>db-cluster-snapshot</code> </p> pub source_type: Option<String>, /// <p>The name of the subscription.</p> <p>Constraints: The name must be less than 255 characters.</p> pub subscription_name: String, pub tags: Option<Vec<Tag>>, } /// Serialize `CreateEventSubscriptionMessage` contents to a `SignedRequest`. struct CreateEventSubscriptionMessageSerializer; impl CreateEventSubscriptionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &CreateEventSubscriptionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.enabled { params.put( &format!("{}{}", prefix, "Enabled"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.event_categories { EventCategoriesListSerializer::serialize( params, &format!("{}{}", prefix, "EventCategory"), field_value, ); } params.put(&format!("{}{}", prefix, "SnsTopicArn"), &obj.sns_topic_arn); if let Some(ref field_value) = obj.source_ids { SourceIdsListSerializer::serialize( params, &format!("{}{}", prefix, "SourceId"), field_value, ); } if let Some(ref field_value) = obj.source_type { params.put(&format!("{}{}", prefix, "SourceType"), &field_value); } params.put( &format!("{}{}", prefix, "SubscriptionName"), &obj.subscription_name, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct CreateEventSubscriptionResult { pub event_subscription: Option<EventSubscription>, } struct CreateEventSubscriptionResultDeserializer; impl CreateEventSubscriptionResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<CreateEventSubscriptionResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = CreateEventSubscriptionResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EventSubscription" => { obj.event_subscription = Some(try!( EventSubscriptionDeserializer::deserialize("EventSubscription", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Contains the details of an Amazon Neptune DB cluster. </p> <p>This data type is used as a response element in the <a>DescribeDBClusters</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBCluster { /// <p> <code>AllocatedStorage</code> always returns 1, because Neptune DB cluster storage size is not fixed, but instead automatically adjusts as needed.</p> pub allocated_storage: Option<i64>, /// <p>Provides a list of the AWS Identity and Access Management (IAM) roles that are associated with the DB cluster. IAM roles that are associated with a DB cluster grant permission for the DB cluster to access other AWS services on your behalf.</p> pub associated_roles: Option<Vec<DBClusterRole>>, /// <p>Provides the list of EC2 Availability Zones that instances in the DB cluster can be created in.</p> pub availability_zones: Option<Vec<String>>, /// <p>Specifies the number of days for which automatic DB snapshots are retained.</p> pub backup_retention_period: Option<i64>, /// <p>If present, specifies the name of the character set that this cluster is associated with.</p> pub character_set_name: Option<String>, /// <p>Identifies the clone group to which the DB cluster is associated.</p> pub clone_group_id: Option<String>, /// <p>Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).</p> pub cluster_create_time: Option<String>, /// <p>The Amazon Resource Name (ARN) for the DB cluster.</p> pub db_cluster_arn: Option<String>, /// <p>Contains a user-supplied DB cluster identifier. This identifier is the unique key that identifies a DB cluster.</p> pub db_cluster_identifier: Option<String>, /// <p>Provides the list of instances that make up the DB cluster.</p> pub db_cluster_members: Option<Vec<DBClusterMember>>, /// <p>Provides the list of option group memberships for this DB cluster.</p> pub db_cluster_option_group_memberships: Option<Vec<DBClusterOptionGroupStatus>>, /// <p>Specifies the name of the DB cluster parameter group for the DB cluster.</p> pub db_cluster_parameter_group: Option<String>, /// <p>Specifies information on the subnet group associated with the DB cluster, including the name, description, and subnets in the subnet group.</p> pub db_subnet_group: Option<String>, /// <p>Contains the name of the initial database of this DB cluster that was provided at create time, if one was specified when the DB cluster was created. This same name is returned for the life of the DB cluster.</p> pub database_name: Option<String>, /// <p>The AWS Region-unique, immutable identifier for the DB cluster. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB cluster is accessed.</p> pub db_cluster_resource_id: Option<String>, /// <p>Specifies the earliest time to which a database can be restored with point-in-time restore.</p> pub earliest_restorable_time: Option<String>, /// <p>Specifies the connection endpoint for the primary instance of the DB cluster.</p> pub endpoint: Option<String>, /// <p>Provides the name of the database engine to be used for this DB cluster.</p> pub engine: Option<String>, /// <p>Indicates the database engine version.</p> pub engine_version: Option<String>, /// <p>Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.</p> pub hosted_zone_id: Option<String>, /// <p>True if mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.</p> pub iam_database_authentication_enabled: Option<bool>, /// <p>If <code>StorageEncrypted</code> is true, the AWS KMS key identifier for the encrypted DB cluster.</p> pub kms_key_id: Option<String>, /// <p>Specifies the latest time to which a database can be restored with point-in-time restore.</p> pub latest_restorable_time: Option<String>, /// <p>Contains the master username for the DB cluster.</p> pub master_username: Option<String>, /// <p>Specifies whether the DB cluster has instances in multiple Availability Zones.</p> pub multi_az: Option<bool>, /// <p>Specifies the progress of the operation as a percentage.</p> pub percent_progress: Option<String>, /// <p>Specifies the port that the database engine is listening on.</p> pub port: Option<i64>, /// <p>Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the <code>BackupRetentionPeriod</code>. </p> pub preferred_backup_window: Option<String>, /// <p>Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).</p> pub preferred_maintenance_window: Option<String>, /// <p>Contains one or more identifiers of the Read Replicas associated with this DB cluster.</p> pub read_replica_identifiers: Option<Vec<String>>, /// <p>The reader endpoint for the DB cluster. The reader endpoint for a DB cluster load-balances connections across the Read Replicas that are available in a DB cluster. As clients request new connections to the reader endpoint, Neptune distributes the connection requests among the Read Replicas in the DB cluster. This functionality can help balance your read workload across multiple Read Replicas in your DB cluster. </p> <p>If a failover occurs, and the Read Replica that you are connected to is promoted to be the primary instance, your connection is dropped. To continue sending your read workload to other Read Replicas in the cluster, you can then reconnect to the reader endpoint.</p> pub reader_endpoint: Option<String>, /// <p>Contains the identifier of the source DB cluster if this DB cluster is a Read Replica.</p> pub replication_source_identifier: Option<String>, /// <p>Specifies the current state of this DB cluster.</p> pub status: Option<String>, /// <p>Specifies whether the DB cluster is encrypted.</p> pub storage_encrypted: Option<bool>, /// <p>Provides a list of VPC security groups that the DB cluster belongs to.</p> pub vpc_security_groups: Option<Vec<VpcSecurityGroupMembership>>, } struct DBClusterDeserializer; impl DBClusterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBCluster, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBCluster::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AllocatedStorage" => { obj.allocated_storage = Some(try!( IntegerOptionalDeserializer::deserialize("AllocatedStorage", stack) )); } "AssociatedRoles" => { obj.associated_roles = Some(try!(DBClusterRolesDeserializer::deserialize( "AssociatedRoles", stack ))); } "AvailabilityZones" => { obj.availability_zones = Some(try!( AvailabilityZonesDeserializer::deserialize("AvailabilityZones", stack) )); } "BackupRetentionPeriod" => { obj.backup_retention_period = Some(try!(IntegerOptionalDeserializer::deserialize( "BackupRetentionPeriod", stack ))); } "CharacterSetName" => { obj.character_set_name = Some(try!(StringDeserializer::deserialize( "CharacterSetName", stack ))); } "CloneGroupId" => { obj.clone_group_id = Some(try!(StringDeserializer::deserialize("CloneGroupId", stack))); } "ClusterCreateTime" => { obj.cluster_create_time = Some(try!(TStampDeserializer::deserialize( "ClusterCreateTime", stack ))); } "DBClusterArn" => { obj.db_cluster_arn = Some(try!(StringDeserializer::deserialize("DBClusterArn", stack))); } "DBClusterIdentifier" => { obj.db_cluster_identifier = Some(try!(StringDeserializer::deserialize( "DBClusterIdentifier", stack ))); } "DBClusterMembers" => { obj.db_cluster_members = Some(try!( DBClusterMemberListDeserializer::deserialize("DBClusterMembers", stack) )); } "DBClusterOptionGroupMemberships" => { obj.db_cluster_option_group_memberships = Some(try!( DBClusterOptionGroupMembershipsDeserializer::deserialize( "DBClusterOptionGroupMemberships", stack ) )); } "DBClusterParameterGroup" => { obj.db_cluster_parameter_group = Some(try!( StringDeserializer::deserialize("DBClusterParameterGroup", stack) )); } "DBSubnetGroup" => { obj.db_subnet_group = Some(try!(StringDeserializer::deserialize( "DBSubnetGroup", stack ))); } "DatabaseName" => { obj.database_name = Some(try!(StringDeserializer::deserialize("DatabaseName", stack))); } "DbClusterResourceId" => { obj.db_cluster_resource_id = Some(try!(StringDeserializer::deserialize( "DbClusterResourceId", stack ))); } "EarliestRestorableTime" => { obj.earliest_restorable_time = Some(try!(TStampDeserializer::deserialize( "EarliestRestorableTime", stack ))); } "Endpoint" => { obj.endpoint = Some(try!(StringDeserializer::deserialize("Endpoint", stack))); } "Engine" => { obj.engine = Some(try!(StringDeserializer::deserialize("Engine", stack))); } "EngineVersion" => { obj.engine_version = Some(try!(StringDeserializer::deserialize( "EngineVersion", stack ))); } "HostedZoneId" => { obj.hosted_zone_id = Some(try!(StringDeserializer::deserialize("HostedZoneId", stack))); } "IAMDatabaseAuthenticationEnabled" => { obj.iam_database_authentication_enabled = Some(try!(BooleanDeserializer::deserialize( "IAMDatabaseAuthenticationEnabled", stack ))); } "KmsKeyId" => { obj.kms_key_id = Some(try!(StringDeserializer::deserialize("KmsKeyId", stack))); } "LatestRestorableTime" => { obj.latest_restorable_time = Some(try!(TStampDeserializer::deserialize( "LatestRestorableTime", stack ))); } "MasterUsername" => { obj.master_username = Some(try!(StringDeserializer::deserialize( "MasterUsername", stack ))); } "MultiAZ" => { obj.multi_az = Some(try!(BooleanDeserializer::deserialize("MultiAZ", stack))); } "PercentProgress" => { obj.percent_progress = Some(try!(StringDeserializer::deserialize( "PercentProgress", stack ))); } "Port" => { obj.port = Some(try!(IntegerOptionalDeserializer::deserialize( "Port", stack ))); } "PreferredBackupWindow" => { obj.preferred_backup_window = Some(try!(StringDeserializer::deserialize( "PreferredBackupWindow", stack ))); } "PreferredMaintenanceWindow" => { obj.preferred_maintenance_window = Some(try!( StringDeserializer::deserialize("PreferredMaintenanceWindow", stack) )); } "ReadReplicaIdentifiers" => { obj.read_replica_identifiers = Some(try!(ReadReplicaIdentifierListDeserializer::deserialize( "ReadReplicaIdentifiers", stack ))); } "ReaderEndpoint" => { obj.reader_endpoint = Some(try!(StringDeserializer::deserialize( "ReaderEndpoint", stack ))); } "ReplicationSourceIdentifier" => { obj.replication_source_identifier = Some(try!( StringDeserializer::deserialize("ReplicationSourceIdentifier", stack) )); } "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } "StorageEncrypted" => { obj.storage_encrypted = Some(try!(BooleanDeserializer::deserialize( "StorageEncrypted", stack ))); } "VpcSecurityGroups" => { obj.vpc_security_groups = Some(try!( VpcSecurityGroupMembershipListDeserializer::deserialize( "VpcSecurityGroups", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBClusterListDeserializer; impl DBClusterListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBCluster>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBCluster" { obj.push(try!(DBClusterDeserializer::deserialize("DBCluster", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Contains information about an instance that is part of a DB cluster.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterMember { /// <p>Specifies the status of the DB cluster parameter group for this member of the DB cluster.</p> pub db_cluster_parameter_group_status: Option<String>, /// <p>Specifies the instance identifier for this member of the DB cluster.</p> pub db_instance_identifier: Option<String>, /// <p>Value that is <code>true</code> if the cluster member is the primary instance for the DB cluster and <code>false</code> otherwise.</p> pub is_cluster_writer: Option<bool>, /// <p>A value that specifies the order in which a Read Replica is promoted to the primary instance after a failure of the existing primary instance. </p> pub promotion_tier: Option<i64>, } struct DBClusterMemberDeserializer; impl DBClusterMemberDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterMember, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterMember::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterParameterGroupStatus" => { obj.db_cluster_parameter_group_status = Some(try!( StringDeserializer::deserialize("DBClusterParameterGroupStatus", stack) )); } "DBInstanceIdentifier" => { obj.db_instance_identifier = Some(try!(StringDeserializer::deserialize( "DBInstanceIdentifier", stack ))); } "IsClusterWriter" => { obj.is_cluster_writer = Some(try!(BooleanDeserializer::deserialize( "IsClusterWriter", stack ))); } "PromotionTier" => { obj.promotion_tier = Some(try!(IntegerOptionalDeserializer::deserialize( "PromotionTier", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBClusterMemberListDeserializer; impl DBClusterMemberListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBClusterMember>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBClusterMember" { obj.push(try!(DBClusterMemberDeserializer::deserialize( "DBClusterMember", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Contains the result of a successful invocation of the <a>DescribeDBClusters</a> action.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterMessage { /// <p>Contains a list of DB clusters for the user.</p> pub db_clusters: Option<Vec<DBCluster>>, /// <p>A pagination token that can be used in a subsequent DescribeDBClusters request.</p> pub marker: Option<String>, } struct DBClusterMessageDeserializer; impl DBClusterMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusters" => { obj.db_clusters = Some(try!(DBClusterListDeserializer::deserialize( "DBClusters", stack ))); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBClusterOptionGroupMembershipsDeserializer; impl DBClusterOptionGroupMembershipsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBClusterOptionGroupStatus>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBClusterOptionGroup" { obj.push(try!(DBClusterOptionGroupStatusDeserializer::deserialize( "DBClusterOptionGroup", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Contains status information for a DB cluster option group.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterOptionGroupStatus { /// <p>Specifies the name of the DB cluster option group.</p> pub db_cluster_option_group_name: Option<String>, /// <p>Specifies the status of the DB cluster option group.</p> pub status: Option<String>, } struct DBClusterOptionGroupStatusDeserializer; impl DBClusterOptionGroupStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterOptionGroupStatus, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterOptionGroupStatus::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterOptionGroupName" => { obj.db_cluster_option_group_name = Some(try!( StringDeserializer::deserialize("DBClusterOptionGroupName", stack) )); } "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Contains the details of an Amazon Neptune DB cluster parameter group. </p> <p>This data type is used as a response element in the <a>DescribeDBClusterParameterGroups</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterParameterGroup { /// <p>The Amazon Resource Name (ARN) for the DB cluster parameter group.</p> pub db_cluster_parameter_group_arn: Option<String>, /// <p>Provides the name of the DB cluster parameter group.</p> pub db_cluster_parameter_group_name: Option<String>, /// <p>Provides the name of the DB parameter group family that this DB cluster parameter group is compatible with.</p> pub db_parameter_group_family: Option<String>, /// <p>Provides the customer-specified description for this DB cluster parameter group.</p> pub description: Option<String>, } struct DBClusterParameterGroupDeserializer; impl DBClusterParameterGroupDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterParameterGroup, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterParameterGroup::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterParameterGroupArn" => { obj.db_cluster_parameter_group_arn = Some(try!( StringDeserializer::deserialize("DBClusterParameterGroupArn", stack) )); } "DBClusterParameterGroupName" => { obj.db_cluster_parameter_group_name = Some(try!( StringDeserializer::deserialize("DBClusterParameterGroupName", stack) )); } "DBParameterGroupFamily" => { obj.db_parameter_group_family = Some(try!( StringDeserializer::deserialize("DBParameterGroupFamily", stack) )); } "Description" => { obj.description = Some(try!(StringDeserializer::deserialize("Description", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Provides details about a DB cluster parameter group including the parameters in the DB cluster parameter group.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterParameterGroupDetails { /// <p> An optional pagination token provided by a previous DescribeDBClusterParameters request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code> . </p> pub marker: Option<String>, /// <p>Provides a list of parameters for the DB cluster parameter group.</p> pub parameters: Option<Vec<Parameter>>, } struct DBClusterParameterGroupDetailsDeserializer; impl DBClusterParameterGroupDetailsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterParameterGroupDetails, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterParameterGroupDetails::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } "Parameters" => { obj.parameters = Some(try!(ParametersListDeserializer::deserialize( "Parameters", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBClusterParameterGroupListDeserializer; impl DBClusterParameterGroupListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBClusterParameterGroup>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBClusterParameterGroup" { obj.push(try!(DBClusterParameterGroupDeserializer::deserialize( "DBClusterParameterGroup", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterParameterGroupNameMessage { /// <p><p>The name of the DB cluster parameter group.</p> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 255 letters or numbers.</p> </li> <li> <p>First character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> </ul> <note> <p>This value is stored as a lowercase string.</p> </note></p> pub db_cluster_parameter_group_name: Option<String>, } struct DBClusterParameterGroupNameMessageDeserializer; impl DBClusterParameterGroupNameMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterParameterGroupNameMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterParameterGroupNameMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterParameterGroupName" => { obj.db_cluster_parameter_group_name = Some(try!( StringDeserializer::deserialize("DBClusterParameterGroupName", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterParameterGroupsMessage { /// <p>A list of DB cluster parameter groups.</p> pub db_cluster_parameter_groups: Option<Vec<DBClusterParameterGroup>>, /// <p> An optional pagination token provided by a previous <code>DescribeDBClusterParameterGroups</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, } struct DBClusterParameterGroupsMessageDeserializer; impl DBClusterParameterGroupsMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterParameterGroupsMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterParameterGroupsMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterParameterGroups" => { obj.db_cluster_parameter_groups = Some(try!(DBClusterParameterGroupListDeserializer::deserialize( "DBClusterParameterGroups", stack ))); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Describes an AWS Identity and Access Management (IAM) role that is associated with a DB cluster.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterRole { /// <p>The Amazon Resource Name (ARN) of the IAM role that is associated with the DB cluster.</p> pub role_arn: Option<String>, /// <p><p>Describes the state of association between the IAM role and the DB cluster. The Status property returns one of the following values:</p> <ul> <li> <p> <code>ACTIVE</code> - the IAM role ARN is associated with the DB cluster and can be used to access other AWS services on your behalf.</p> </li> <li> <p> <code>PENDING</code> - the IAM role ARN is being associated with the DB cluster.</p> </li> <li> <p> <code>INVALID</code> - the IAM role ARN is associated with the DB cluster, but the DB cluster is unable to assume the IAM role in order to access other AWS services on your behalf.</p> </li> </ul></p> pub status: Option<String>, } struct DBClusterRoleDeserializer; impl DBClusterRoleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterRole, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterRole::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "RoleArn" => { obj.role_arn = Some(try!(StringDeserializer::deserialize("RoleArn", stack))); } "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBClusterRolesDeserializer; impl DBClusterRolesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBClusterRole>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBClusterRole" { obj.push(try!(DBClusterRoleDeserializer::deserialize( "DBClusterRole", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Contains the details for an Amazon Neptune DB cluster snapshot </p> <p>This data type is used as a response element in the <a>DescribeDBClusterSnapshots</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterSnapshot { /// <p>Specifies the allocated storage size in gibibytes (GiB).</p> pub allocated_storage: Option<i64>, /// <p>Provides the list of EC2 Availability Zones that instances in the DB cluster snapshot can be restored in.</p> pub availability_zones: Option<Vec<String>>, /// <p>Specifies the time when the DB cluster was created, in Universal Coordinated Time (UTC).</p> pub cluster_create_time: Option<String>, /// <p>Specifies the DB cluster identifier of the DB cluster that this DB cluster snapshot was created from.</p> pub db_cluster_identifier: Option<String>, /// <p>The Amazon Resource Name (ARN) for the DB cluster snapshot.</p> pub db_cluster_snapshot_arn: Option<String>, /// <p>Specifies the identifier for the DB cluster snapshot.</p> pub db_cluster_snapshot_identifier: Option<String>, /// <p>Specifies the name of the database engine.</p> pub engine: Option<String>, /// <p>Provides the version of the database engine for this DB cluster snapshot.</p> pub engine_version: Option<String>, /// <p>True if mapping of AWS Identity and Access Management (IAM) accounts to database accounts is enabled, and otherwise false.</p> pub iam_database_authentication_enabled: Option<bool>, /// <p>If <code>StorageEncrypted</code> is true, the AWS KMS key identifier for the encrypted DB cluster snapshot.</p> pub kms_key_id: Option<String>, /// <p>Provides the license model information for this DB cluster snapshot.</p> pub license_model: Option<String>, /// <p>Provides the master username for the DB cluster snapshot.</p> pub master_username: Option<String>, /// <p>Specifies the percentage of the estimated data that has been transferred.</p> pub percent_progress: Option<i64>, /// <p>Specifies the port that the DB cluster was listening on at the time of the snapshot.</p> pub port: Option<i64>, /// <p>Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC).</p> pub snapshot_create_time: Option<String>, /// <p>Provides the type of the DB cluster snapshot.</p> pub snapshot_type: Option<String>, /// <p>If the DB cluster snapshot was copied from a source DB cluster snapshot, the Amazon Resource Name (ARN) for the source DB cluster snapshot, otherwise, a null value.</p> pub source_db_cluster_snapshot_arn: Option<String>, /// <p>Specifies the status of this DB cluster snapshot.</p> pub status: Option<String>, /// <p>Specifies whether the DB cluster snapshot is encrypted.</p> pub storage_encrypted: Option<bool>, /// <p>Provides the VPC ID associated with the DB cluster snapshot.</p> pub vpc_id: Option<String>, } struct DBClusterSnapshotDeserializer; impl DBClusterSnapshotDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterSnapshot, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterSnapshot::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AllocatedStorage" => { obj.allocated_storage = Some(try!(IntegerDeserializer::deserialize( "AllocatedStorage", stack ))); } "AvailabilityZones" => { obj.availability_zones = Some(try!( AvailabilityZonesDeserializer::deserialize("AvailabilityZones", stack) )); } "ClusterCreateTime" => { obj.cluster_create_time = Some(try!(TStampDeserializer::deserialize( "ClusterCreateTime", stack ))); } "DBClusterIdentifier" => { obj.db_cluster_identifier = Some(try!(StringDeserializer::deserialize( "DBClusterIdentifier", stack ))); } "DBClusterSnapshotArn" => { obj.db_cluster_snapshot_arn = Some(try!(StringDeserializer::deserialize( "DBClusterSnapshotArn", stack ))); } "DBClusterSnapshotIdentifier" => { obj.db_cluster_snapshot_identifier = Some(try!( StringDeserializer::deserialize("DBClusterSnapshotIdentifier", stack) )); } "Engine" => { obj.engine = Some(try!(StringDeserializer::deserialize("Engine", stack))); } "EngineVersion" => { obj.engine_version = Some(try!(StringDeserializer::deserialize( "EngineVersion", stack ))); } "IAMDatabaseAuthenticationEnabled" => { obj.iam_database_authentication_enabled = Some(try!(BooleanDeserializer::deserialize( "IAMDatabaseAuthenticationEnabled", stack ))); } "KmsKeyId" => { obj.kms_key_id = Some(try!(StringDeserializer::deserialize("KmsKeyId", stack))); } "LicenseModel" => { obj.license_model = Some(try!(StringDeserializer::deserialize("LicenseModel", stack))); } "MasterUsername" => { obj.master_username = Some(try!(StringDeserializer::deserialize( "MasterUsername", stack ))); } "PercentProgress" => { obj.percent_progress = Some(try!(IntegerDeserializer::deserialize( "PercentProgress", stack ))); } "Port" => { obj.port = Some(try!(IntegerDeserializer::deserialize("Port", stack))); } "SnapshotCreateTime" => { obj.snapshot_create_time = Some(try!(TStampDeserializer::deserialize( "SnapshotCreateTime", stack ))); } "SnapshotType" => { obj.snapshot_type = Some(try!(StringDeserializer::deserialize("SnapshotType", stack))); } "SourceDBClusterSnapshotArn" => { obj.source_db_cluster_snapshot_arn = Some(try!( StringDeserializer::deserialize("SourceDBClusterSnapshotArn", stack) )); } "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } "StorageEncrypted" => { obj.storage_encrypted = Some(try!(BooleanDeserializer::deserialize( "StorageEncrypted", stack ))); } "VpcId" => { obj.vpc_id = Some(try!(StringDeserializer::deserialize("VpcId", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Contains the name and values of a manual DB cluster snapshot attribute.</p> <p>Manual DB cluster snapshot attributes are used to authorize other AWS accounts to restore a manual DB cluster snapshot. For more information, see the <a>ModifyDBClusterSnapshotAttribute</a> API action.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterSnapshotAttribute { /// <p>The name of the manual DB cluster snapshot attribute.</p> <p>The attribute named <code>restore</code> refers to the list of AWS accounts that have permission to copy or restore the manual DB cluster snapshot. For more information, see the <a>ModifyDBClusterSnapshotAttribute</a> API action.</p> pub attribute_name: Option<String>, /// <p>The value(s) for the manual DB cluster snapshot attribute.</p> <p>If the <code>AttributeName</code> field is set to <code>restore</code>, then this element returns a list of IDs of the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If a value of <code>all</code> is in the list, then the manual DB cluster snapshot is public and available for any AWS account to copy or restore.</p> pub attribute_values: Option<Vec<String>>, } struct DBClusterSnapshotAttributeDeserializer; impl DBClusterSnapshotAttributeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterSnapshotAttribute, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterSnapshotAttribute::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AttributeName" => { obj.attribute_name = Some(try!(StringDeserializer::deserialize( "AttributeName", stack ))); } "AttributeValues" => { obj.attribute_values = Some(try!( AttributeValueListDeserializer::deserialize("AttributeValues", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBClusterSnapshotAttributeListDeserializer; impl DBClusterSnapshotAttributeListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBClusterSnapshotAttribute>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBClusterSnapshotAttribute" { obj.push(try!(DBClusterSnapshotAttributeDeserializer::deserialize( "DBClusterSnapshotAttribute", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Contains the results of a successful call to the <a>DescribeDBClusterSnapshotAttributes</a> API action.</p> <p>Manual DB cluster snapshot attributes are used to authorize other AWS accounts to copy or restore a manual DB cluster snapshot. For more information, see the <a>ModifyDBClusterSnapshotAttribute</a> API action.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterSnapshotAttributesResult { /// <p>The list of attributes and values for the manual DB cluster snapshot.</p> pub db_cluster_snapshot_attributes: Option<Vec<DBClusterSnapshotAttribute>>, /// <p>The identifier of the manual DB cluster snapshot that the attributes apply to.</p> pub db_cluster_snapshot_identifier: Option<String>, } struct DBClusterSnapshotAttributesResultDeserializer; impl DBClusterSnapshotAttributesResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterSnapshotAttributesResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterSnapshotAttributesResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterSnapshotAttributes" => { obj.db_cluster_snapshot_attributes = Some(try!( DBClusterSnapshotAttributeListDeserializer::deserialize( "DBClusterSnapshotAttributes", stack ) )); } "DBClusterSnapshotIdentifier" => { obj.db_cluster_snapshot_identifier = Some(try!( StringDeserializer::deserialize("DBClusterSnapshotIdentifier", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBClusterSnapshotListDeserializer; impl DBClusterSnapshotListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBClusterSnapshot>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBClusterSnapshot" { obj.push(try!(DBClusterSnapshotDeserializer::deserialize( "DBClusterSnapshot", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p> Provides a list of DB cluster snapshots for the user as the result of a call to the <a>DescribeDBClusterSnapshots</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBClusterSnapshotMessage { /// <p>Provides a list of DB cluster snapshots for the user.</p> pub db_cluster_snapshots: Option<Vec<DBClusterSnapshot>>, /// <p> An optional pagination token provided by a previous <a>DescribeDBClusterSnapshots</a> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, } struct DBClusterSnapshotMessageDeserializer; impl DBClusterSnapshotMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBClusterSnapshotMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBClusterSnapshotMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterSnapshots" => { obj.db_cluster_snapshots = Some(try!(DBClusterSnapshotListDeserializer::deserialize( "DBClusterSnapshots", stack ))); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> This data type is used as a response element in the action <a>DescribeDBEngineVersions</a>. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBEngineVersion { /// <p>The description of the database engine.</p> pub db_engine_description: Option<String>, /// <p>The description of the database engine version.</p> pub db_engine_version_description: Option<String>, /// <p>The name of the DB parameter group family for the database engine.</p> pub db_parameter_group_family: Option<String>, /// <p> The default character set for new instances of this engine version, if the <code>CharacterSetName</code> parameter of the CreateDBInstance API is not specified. </p> pub default_character_set: Option<CharacterSet>, /// <p>The name of the database engine.</p> pub engine: Option<String>, /// <p>The version number of the database engine.</p> pub engine_version: Option<String>, /// <p>The types of logs that the database engine has available for export to CloudWatch Logs.</p> pub exportable_log_types: Option<Vec<String>>, /// <p> A list of the character sets supported by this engine for the <code>CharacterSetName</code> parameter of the <code>CreateDBInstance</code> action. </p> pub supported_character_sets: Option<Vec<CharacterSet>>, /// <p>A list of the time zones supported by this engine for the <code>Timezone</code> parameter of the <code>CreateDBInstance</code> action. </p> pub supported_timezones: Option<Vec<Timezone>>, /// <p>A value that indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.</p> pub supports_log_exports_to_cloudwatch_logs: Option<bool>, /// <p>Indicates whether the database engine version supports read replicas.</p> pub supports_read_replica: Option<bool>, /// <p>A list of engine versions that this database engine version can be upgraded to.</p> pub valid_upgrade_target: Option<Vec<UpgradeTarget>>, } struct DBEngineVersionDeserializer; impl DBEngineVersionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBEngineVersion, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBEngineVersion::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBEngineDescription" => { obj.db_engine_description = Some(try!(StringDeserializer::deserialize( "DBEngineDescription", stack ))); } "DBEngineVersionDescription" => { obj.db_engine_version_description = Some(try!( StringDeserializer::deserialize("DBEngineVersionDescription", stack) )); } "DBParameterGroupFamily" => { obj.db_parameter_group_family = Some(try!( StringDeserializer::deserialize("DBParameterGroupFamily", stack) )); } "DefaultCharacterSet" => { obj.default_character_set = Some(try!( CharacterSetDeserializer::deserialize("DefaultCharacterSet", stack) )); } "Engine" => { obj.engine = Some(try!(StringDeserializer::deserialize("Engine", stack))); } "EngineVersion" => { obj.engine_version = Some(try!(StringDeserializer::deserialize( "EngineVersion", stack ))); } "ExportableLogTypes" => { obj.exportable_log_types = Some(try!( LogTypeListDeserializer::deserialize("ExportableLogTypes", stack) )); } "SupportedCharacterSets" => { obj.supported_character_sets = Some(try!(SupportedCharacterSetsListDeserializer::deserialize( "SupportedCharacterSets", stack ))); } "SupportedTimezones" => { obj.supported_timezones = Some(try!(SupportedTimezonesListDeserializer::deserialize( "SupportedTimezones", stack ))); } "SupportsLogExportsToCloudwatchLogs" => { obj.supports_log_exports_to_cloudwatch_logs = Some(try!(BooleanDeserializer::deserialize( "SupportsLogExportsToCloudwatchLogs", stack ))); } "SupportsReadReplica" => { obj.supports_read_replica = Some(try!(BooleanDeserializer::deserialize( "SupportsReadReplica", stack ))); } "ValidUpgradeTarget" => { obj.valid_upgrade_target = Some(try!(ValidUpgradeTargetListDeserializer::deserialize( "ValidUpgradeTarget", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBEngineVersionListDeserializer; impl DBEngineVersionListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBEngineVersion>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBEngineVersion" { obj.push(try!(DBEngineVersionDeserializer::deserialize( "DBEngineVersion", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p> Contains the result of a successful invocation of the <a>DescribeDBEngineVersions</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBEngineVersionMessage { /// <p> A list of <code>DBEngineVersion</code> elements. </p> pub db_engine_versions: Option<Vec<DBEngineVersion>>, /// <p> An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, } struct DBEngineVersionMessageDeserializer; impl DBEngineVersionMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBEngineVersionMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBEngineVersionMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBEngineVersions" => { obj.db_engine_versions = Some(try!( DBEngineVersionListDeserializer::deserialize("DBEngineVersions", stack) )); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Contains the details of an Amazon Neptune DB instance. </p> <p>This data type is used as a response element in the <a>DescribeDBInstances</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBInstance { /// <p>Specifies the allocated storage size specified in gibibytes.</p> pub allocated_storage: Option<i64>, /// <p>Indicates that minor version patches are applied automatically.</p> pub auto_minor_version_upgrade: Option<bool>, /// <p>Specifies the name of the Availability Zone the DB instance is located in.</p> pub availability_zone: Option<String>, /// <p>Specifies the number of days for which automatic DB snapshots are retained.</p> pub backup_retention_period: Option<i64>, /// <p>The identifier of the CA certificate for this DB instance.</p> pub ca_certificate_identifier: Option<String>, /// <p>If present, specifies the name of the character set that this instance is associated with.</p> pub character_set_name: Option<String>, /// <p>Specifies whether tags are copied from the DB instance to snapshots of the DB instance.</p> pub copy_tags_to_snapshot: Option<bool>, /// <p>If the DB instance is a member of a DB cluster, contains the name of the DB cluster that the DB instance is a member of.</p> pub db_cluster_identifier: Option<String>, /// <p>The Amazon Resource Name (ARN) for the DB instance.</p> pub db_instance_arn: Option<String>, /// <p>Contains the name of the compute and memory capacity class of the DB instance.</p> pub db_instance_class: Option<String>, /// <p>Contains a user-supplied database identifier. This identifier is the unique key that identifies a DB instance.</p> pub db_instance_identifier: Option<String>, /// <p>Specifies the current state of this database.</p> pub db_instance_status: Option<String>, /// <p>The database name.</p> pub db_name: Option<String>, /// <p>Provides the list of DB parameter groups applied to this DB instance.</p> pub db_parameter_groups: Option<Vec<DBParameterGroupStatus>>, /// <p> Provides List of DB security group elements containing only <code>DBSecurityGroup.Name</code> and <code>DBSecurityGroup.Status</code> subelements. </p> pub db_security_groups: Option<Vec<DBSecurityGroupMembership>>, /// <p>Specifies information on the subnet group associated with the DB instance, including the name, description, and subnets in the subnet group.</p> pub db_subnet_group: Option<DBSubnetGroup>, /// <p>Specifies the port that the DB instance listens on. If the DB instance is part of a DB cluster, this can be a different port than the DB cluster port.</p> pub db_instance_port: Option<i64>, /// <p>The AWS Region-unique, immutable identifier for the DB instance. This identifier is found in AWS CloudTrail log entries whenever the AWS KMS key for the DB instance is accessed.</p> pub dbi_resource_id: Option<String>, /// <p>Not supported</p> pub domain_memberships: Option<Vec<DomainMembership>>, /// <p>A list of log types that this DB instance is configured to export to CloudWatch Logs.</p> pub enabled_cloudwatch_logs_exports: Option<Vec<String>>, /// <p>Specifies the connection endpoint.</p> pub endpoint: Option<Endpoint>, /// <p>Provides the name of the database engine to be used for this DB instance.</p> pub engine: Option<String>, /// <p>Indicates the database engine version.</p> pub engine_version: Option<String>, /// <p>The Amazon Resource Name (ARN) of the Amazon CloudWatch Logs log stream that receives the Enhanced Monitoring metrics data for the DB instance.</p> pub enhanced_monitoring_resource_arn: Option<String>, /// <p>True if AWS Identity and Access Management (IAM) authentication is enabled, and otherwise false.</p> pub iam_database_authentication_enabled: Option<bool>, /// <p>Provides the date and time the DB instance was created.</p> pub instance_create_time: Option<String>, /// <p>Specifies the Provisioned IOPS (I/O operations per second) value.</p> pub iops: Option<i64>, /// <p> If <code>StorageEncrypted</code> is true, the AWS KMS key identifier for the encrypted DB instance. </p> pub kms_key_id: Option<String>, /// <p>Specifies the latest time to which a database can be restored with point-in-time restore.</p> pub latest_restorable_time: Option<String>, /// <p>License model information for this DB instance.</p> pub license_model: Option<String>, /// <p>Contains the master username for the DB instance.</p> pub master_username: Option<String>, /// <p>The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.</p> pub monitoring_interval: Option<i64>, /// <p>The ARN for the IAM role that permits Neptune to send Enhanced Monitoring metrics to Amazon CloudWatch Logs.</p> pub monitoring_role_arn: Option<String>, /// <p>Specifies if the DB instance is a Multi-AZ deployment.</p> pub multi_az: Option<bool>, /// <p>Provides the list of option group memberships for this DB instance.</p> pub option_group_memberships: Option<Vec<OptionGroupMembership>>, /// <p>Specifies that changes to the DB instance are pending. This element is only included when changes are pending. Specific changes are identified by subelements.</p> pub pending_modified_values: Option<PendingModifiedValues>, /// <p>True if Performance Insights is enabled for the DB instance, and otherwise false.</p> pub performance_insights_enabled: Option<bool>, /// <p>The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.</p> pub performance_insights_kms_key_id: Option<String>, /// <p> Specifies the daily time range during which automated backups are created if automated backups are enabled, as determined by the <code>BackupRetentionPeriod</code>. </p> pub preferred_backup_window: Option<String>, /// <p>Specifies the weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).</p> pub preferred_maintenance_window: Option<String>, /// <p>A value that specifies the order in which a Read Replica is promoted to the primary instance after a failure of the existing primary instance. </p> pub promotion_tier: Option<i64>, /// <p>Contains one or more identifiers of DB clusters that are Read Replicas of this DB instance.</p> pub read_replica_db_cluster_identifiers: Option<Vec<String>>, /// <p>Contains one or more identifiers of the Read Replicas associated with this DB instance.</p> pub read_replica_db_instance_identifiers: Option<Vec<String>>, /// <p>Contains the identifier of the source DB instance if this DB instance is a Read Replica.</p> pub read_replica_source_db_instance_identifier: Option<String>, /// <p>If present, specifies the name of the secondary Availability Zone for a DB instance with multi-AZ support.</p> pub secondary_availability_zone: Option<String>, /// <p>The status of a Read Replica. If the instance is not a Read Replica, this is blank.</p> pub status_infos: Option<Vec<DBInstanceStatusInfo>>, /// <p>Specifies whether the DB instance is encrypted.</p> pub storage_encrypted: Option<bool>, /// <p>Specifies the storage type associated with DB instance.</p> pub storage_type: Option<String>, /// <p>The ARN from the key store with which the instance is associated for TDE encryption.</p> pub tde_credential_arn: Option<String>, /// <p>Not supported. </p> pub timezone: Option<String>, /// <p>Provides a list of VPC security group elements that the DB instance belongs to.</p> pub vpc_security_groups: Option<Vec<VpcSecurityGroupMembership>>, } struct DBInstanceDeserializer; impl DBInstanceDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBInstance, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBInstance::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AllocatedStorage" => { obj.allocated_storage = Some(try!(IntegerDeserializer::deserialize( "AllocatedStorage", stack ))); } "AutoMinorVersionUpgrade" => { obj.auto_minor_version_upgrade = Some(try!( BooleanDeserializer::deserialize("AutoMinorVersionUpgrade", stack) )); } "AvailabilityZone" => { obj.availability_zone = Some(try!(StringDeserializer::deserialize( "AvailabilityZone", stack ))); } "BackupRetentionPeriod" => { obj.backup_retention_period = Some(try!(IntegerDeserializer::deserialize( "BackupRetentionPeriod", stack ))); } "CACertificateIdentifier" => { obj.ca_certificate_identifier = Some(try!( StringDeserializer::deserialize("CACertificateIdentifier", stack) )); } "CharacterSetName" => { obj.character_set_name = Some(try!(StringDeserializer::deserialize( "CharacterSetName", stack ))); } "CopyTagsToSnapshot" => { obj.copy_tags_to_snapshot = Some(try!(BooleanDeserializer::deserialize( "CopyTagsToSnapshot", stack ))); } "DBClusterIdentifier" => { obj.db_cluster_identifier = Some(try!(StringDeserializer::deserialize( "DBClusterIdentifier", stack ))); } "DBInstanceArn" => { obj.db_instance_arn = Some(try!(StringDeserializer::deserialize( "DBInstanceArn", stack ))); } "DBInstanceClass" => { obj.db_instance_class = Some(try!(StringDeserializer::deserialize( "DBInstanceClass", stack ))); } "DBInstanceIdentifier" => { obj.db_instance_identifier = Some(try!(StringDeserializer::deserialize( "DBInstanceIdentifier", stack ))); } "DBInstanceStatus" => { obj.db_instance_status = Some(try!(StringDeserializer::deserialize( "DBInstanceStatus", stack ))); } "DBName" => { obj.db_name = Some(try!(StringDeserializer::deserialize("DBName", stack))); } "DBParameterGroups" => { obj.db_parameter_groups = Some(try!(DBParameterGroupStatusListDeserializer::deserialize( "DBParameterGroups", stack ))); } "DBSecurityGroups" => { obj.db_security_groups = Some(try!( DBSecurityGroupMembershipListDeserializer::deserialize( "DBSecurityGroups", stack ) )); } "DBSubnetGroup" => { obj.db_subnet_group = Some(try!(DBSubnetGroupDeserializer::deserialize( "DBSubnetGroup", stack ))); } "DbInstancePort" => { obj.db_instance_port = Some(try!(IntegerDeserializer::deserialize( "DbInstancePort", stack ))); } "DbiResourceId" => { obj.dbi_resource_id = Some(try!(StringDeserializer::deserialize( "DbiResourceId", stack ))); } "DomainMemberships" => { obj.domain_memberships = Some(try!(DomainMembershipListDeserializer::deserialize( "DomainMemberships", stack ))); } "EnabledCloudwatchLogsExports" => { obj.enabled_cloudwatch_logs_exports = Some(try!(LogTypeListDeserializer::deserialize( "EnabledCloudwatchLogsExports", stack ))); } "Endpoint" => { obj.endpoint = Some(try!(EndpointDeserializer::deserialize("Endpoint", stack))); } "Engine" => { obj.engine = Some(try!(StringDeserializer::deserialize("Engine", stack))); } "EngineVersion" => { obj.engine_version = Some(try!(StringDeserializer::deserialize( "EngineVersion", stack ))); } "EnhancedMonitoringResourceArn" => { obj.enhanced_monitoring_resource_arn = Some(try!( StringDeserializer::deserialize("EnhancedMonitoringResourceArn", stack) )); } "IAMDatabaseAuthenticationEnabled" => { obj.iam_database_authentication_enabled = Some(try!(BooleanDeserializer::deserialize( "IAMDatabaseAuthenticationEnabled", stack ))); } "InstanceCreateTime" => { obj.instance_create_time = Some(try!(TStampDeserializer::deserialize( "InstanceCreateTime", stack ))); } "Iops" => { obj.iops = Some(try!(IntegerOptionalDeserializer::deserialize( "Iops", stack ))); } "KmsKeyId" => { obj.kms_key_id = Some(try!(StringDeserializer::deserialize("KmsKeyId", stack))); } "LatestRestorableTime" => { obj.latest_restorable_time = Some(try!(TStampDeserializer::deserialize( "LatestRestorableTime", stack ))); } "LicenseModel" => { obj.license_model = Some(try!(StringDeserializer::deserialize("LicenseModel", stack))); } "MasterUsername" => { obj.master_username = Some(try!(StringDeserializer::deserialize( "MasterUsername", stack ))); } "MonitoringInterval" => { obj.monitoring_interval = Some(try!( IntegerOptionalDeserializer::deserialize("MonitoringInterval", stack) )); } "MonitoringRoleArn" => { obj.monitoring_role_arn = Some(try!(StringDeserializer::deserialize( "MonitoringRoleArn", stack ))); } "MultiAZ" => { obj.multi_az = Some(try!(BooleanDeserializer::deserialize("MultiAZ", stack))); } "OptionGroupMemberships" => { obj.option_group_memberships = Some(try!(OptionGroupMembershipListDeserializer::deserialize( "OptionGroupMemberships", stack ))); } "PendingModifiedValues" => { obj.pending_modified_values = Some(try!(PendingModifiedValuesDeserializer::deserialize( "PendingModifiedValues", stack ))); } "PerformanceInsightsEnabled" => { obj.performance_insights_enabled = Some(try!(BooleanOptionalDeserializer::deserialize( "PerformanceInsightsEnabled", stack ))); } "PerformanceInsightsKMSKeyId" => { obj.performance_insights_kms_key_id = Some(try!( StringDeserializer::deserialize("PerformanceInsightsKMSKeyId", stack) )); } "PreferredBackupWindow" => { obj.preferred_backup_window = Some(try!(StringDeserializer::deserialize( "PreferredBackupWindow", stack ))); } "PreferredMaintenanceWindow" => { obj.preferred_maintenance_window = Some(try!( StringDeserializer::deserialize("PreferredMaintenanceWindow", stack) )); } "PromotionTier" => { obj.promotion_tier = Some(try!(IntegerOptionalDeserializer::deserialize( "PromotionTier", stack ))); } "ReadReplicaDBClusterIdentifiers" => { obj.read_replica_db_cluster_identifiers = Some(try!( ReadReplicaDBClusterIdentifierListDeserializer::deserialize( "ReadReplicaDBClusterIdentifiers", stack ) )); } "ReadReplicaDBInstanceIdentifiers" => { obj.read_replica_db_instance_identifiers = Some(try!( ReadReplicaDBInstanceIdentifierListDeserializer::deserialize( "ReadReplicaDBInstanceIdentifiers", stack ) )); } "ReadReplicaSourceDBInstanceIdentifier" => { obj.read_replica_source_db_instance_identifier = Some(try!(StringDeserializer::deserialize( "ReadReplicaSourceDBInstanceIdentifier", stack ))); } "SecondaryAvailabilityZone" => { obj.secondary_availability_zone = Some(try!( StringDeserializer::deserialize("SecondaryAvailabilityZone", stack) )); } "StatusInfos" => { obj.status_infos = Some(try!( DBInstanceStatusInfoListDeserializer::deserialize("StatusInfos", stack) )); } "StorageEncrypted" => { obj.storage_encrypted = Some(try!(BooleanDeserializer::deserialize( "StorageEncrypted", stack ))); } "StorageType" => { obj.storage_type = Some(try!(StringDeserializer::deserialize("StorageType", stack))); } "TdeCredentialArn" => { obj.tde_credential_arn = Some(try!(StringDeserializer::deserialize( "TdeCredentialArn", stack ))); } "Timezone" => { obj.timezone = Some(try!(StringDeserializer::deserialize("Timezone", stack))); } "VpcSecurityGroups" => { obj.vpc_security_groups = Some(try!( VpcSecurityGroupMembershipListDeserializer::deserialize( "VpcSecurityGroups", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBInstanceListDeserializer; impl DBInstanceListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBInstance>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBInstance" { obj.push(try!(DBInstanceDeserializer::deserialize( "DBInstance", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p> Contains the result of a successful invocation of the <a>DescribeDBInstances</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBInstanceMessage { /// <p> A list of <a>DBInstance</a> instances. </p> pub db_instances: Option<Vec<DBInstance>>, /// <p> An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code> . </p> pub marker: Option<String>, } struct DBInstanceMessageDeserializer; impl DBInstanceMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBInstanceMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBInstanceMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBInstances" => { obj.db_instances = Some(try!(DBInstanceListDeserializer::deserialize( "DBInstances", stack ))); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Provides a list of status information for a DB instance.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBInstanceStatusInfo { /// <p>Details of the error if there is an error for the instance. If the instance is not in an error state, this value is blank.</p> pub message: Option<String>, /// <p>Boolean value that is true if the instance is operating normally, or false if the instance is in an error state.</p> pub normal: Option<bool>, /// <p>Status of the DB instance. For a StatusType of read replica, the values can be replicating, error, stopped, or terminated.</p> pub status: Option<String>, /// <p>This value is currently "read replication."</p> pub status_type: Option<String>, } struct DBInstanceStatusInfoDeserializer; impl DBInstanceStatusInfoDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBInstanceStatusInfo, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBInstanceStatusInfo::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Message" => { obj.message = Some(try!(StringDeserializer::deserialize("Message", stack))); } "Normal" => { obj.normal = Some(try!(BooleanDeserializer::deserialize("Normal", stack))); } "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } "StatusType" => { obj.status_type = Some(try!(StringDeserializer::deserialize("StatusType", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBInstanceStatusInfoListDeserializer; impl DBInstanceStatusInfoListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBInstanceStatusInfo>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBInstanceStatusInfo" { obj.push(try!(DBInstanceStatusInfoDeserializer::deserialize( "DBInstanceStatusInfo", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Contains the details of an Amazon Neptune DB parameter group. </p> <p>This data type is used as a response element in the <a>DescribeDBParameterGroups</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBParameterGroup { /// <p>The Amazon Resource Name (ARN) for the DB parameter group.</p> pub db_parameter_group_arn: Option<String>, /// <p>Provides the name of the DB parameter group family that this DB parameter group is compatible with.</p> pub db_parameter_group_family: Option<String>, /// <p>Provides the name of the DB parameter group.</p> pub db_parameter_group_name: Option<String>, /// <p>Provides the customer-specified description for this DB parameter group.</p> pub description: Option<String>, } struct DBParameterGroupDeserializer; impl DBParameterGroupDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBParameterGroup, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBParameterGroup::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { match &name[..] { "DBParameterGroupArn" => { obj.db_parameter_group_arn = Some(try!( StringDeserializer::deserialize("DBParameterGroupArn", stack) )); } "DBParameterGroupFamily" => { obj.db_parameter_group_family = Some(try!( StringDeserializer::deserialize("DBParameterGroupFamily", stack) )); } "DBParameterGroupName" => { obj.db_parameter_group_name = Some(try!( StringDeserializer::deserialize("DBParameterGroupName", stack) )); } "Description" => { obj.description = Some(try!(StringDeserializer::deserialize("Description", stack))); } _ => skip_tree(stack), } } DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> Contains the result of a successful invocation of the <a>DescribeDBParameters</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBParameterGroupDetails { /// <p> An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> A list of <a>Parameter</a> values. </p> pub parameters: Option<Vec<Parameter>>, } struct DBParameterGroupDetailsDeserializer; impl DBParameterGroupDetailsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBParameterGroupDetails, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBParameterGroupDetails::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } "Parameters" => { obj.parameters = Some(try!(ParametersListDeserializer::deserialize( "Parameters", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBParameterGroupListDeserializer; impl DBParameterGroupListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBParameterGroup>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBParameterGroup" { obj.push(try!(DBParameterGroupDeserializer::deserialize( "DBParameterGroup", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p> Contains the result of a successful invocation of the <a>ModifyDBParameterGroup</a> or <a>ResetDBParameterGroup</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBParameterGroupNameMessage { /// <p>Provides the name of the DB parameter group.</p> pub db_parameter_group_name: Option<String>, } struct DBParameterGroupNameMessageDeserializer; impl DBParameterGroupNameMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBParameterGroupNameMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBParameterGroupNameMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBParameterGroupName" => { obj.db_parameter_group_name = Some(try!(StringDeserializer::deserialize( "DBParameterGroupName", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p>The status of the DB parameter group.</p> <p>This data type is used as a response element in the following actions:</p> <ul> <li> <p> <a>CreateDBInstance</a> </p> </li> <li> <p> <a>DeleteDBInstance</a> </p> </li> <li> <p> <a>ModifyDBInstance</a> </p> </li> <li> <p> <a>RebootDBInstance</a> </p> </li> </ul></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBParameterGroupStatus { /// <p>The name of the DP parameter group.</p> pub db_parameter_group_name: Option<String>, /// <p>The status of parameter updates.</p> pub parameter_apply_status: Option<String>, } struct DBParameterGroupStatusDeserializer; impl DBParameterGroupStatusDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBParameterGroupStatus, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBParameterGroupStatus::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { match &name[..] { "DBParameterGroupName" => { obj.db_parameter_group_name = Some(try!( StringDeserializer::deserialize("DBParameterGroupName", stack) )); } "ParameterApplyStatus" => { obj.parameter_apply_status = Some(try!( StringDeserializer::deserialize("ParameterApplyStatus", stack) )); } _ => skip_tree(stack), } } DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBParameterGroupStatusListDeserializer; impl DBParameterGroupStatusListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBParameterGroupStatus>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBParameterGroup" { obj.push(try!(DBParameterGroupStatusDeserializer::deserialize( "DBParameterGroup", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p> Contains the result of a successful invocation of the <a>DescribeDBParameterGroups</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBParameterGroupsMessage { /// <p> A list of <a>DBParameterGroup</a> instances. </p> pub db_parameter_groups: Option<Vec<DBParameterGroup>>, /// <p> An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, } struct DBParameterGroupsMessageDeserializer; impl DBParameterGroupsMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBParameterGroupsMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBParameterGroupsMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBParameterGroups" => { obj.db_parameter_groups = Some(try!(DBParameterGroupListDeserializer::deserialize( "DBParameterGroups", stack ))); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p>This data type is used as a response element in the following actions:</p> <ul> <li> <p> <a>ModifyDBInstance</a> </p> </li> <li> <p> <a>RebootDBInstance</a> </p> </li> </ul></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBSecurityGroupMembership { /// <p>The name of the DB security group.</p> pub db_security_group_name: Option<String>, /// <p>The status of the DB security group.</p> pub status: Option<String>, } struct DBSecurityGroupMembershipDeserializer; impl DBSecurityGroupMembershipDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBSecurityGroupMembership, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBSecurityGroupMembership::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBSecurityGroupName" => { obj.db_security_group_name = Some(try!(StringDeserializer::deserialize( "DBSecurityGroupName", stack ))); } "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBSecurityGroupMembershipListDeserializer; impl DBSecurityGroupMembershipListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBSecurityGroupMembership>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBSecurityGroup" { obj.push(try!(DBSecurityGroupMembershipDeserializer::deserialize( "DBSecurityGroup", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Serialize `DBSecurityGroupNameList` contents to a `SignedRequest`. struct DBSecurityGroupNameListSerializer; impl DBSecurityGroupNameListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } /// <p>Contains the details of an Amazon Neptune DB subnet group. </p> <p>This data type is used as a response element in the <a>DescribeDBSubnetGroups</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBSubnetGroup { /// <p>The Amazon Resource Name (ARN) for the DB subnet group.</p> pub db_subnet_group_arn: Option<String>, /// <p>Provides the description of the DB subnet group.</p> pub db_subnet_group_description: Option<String>, /// <p>The name of the DB subnet group.</p> pub db_subnet_group_name: Option<String>, /// <p>Provides the status of the DB subnet group.</p> pub subnet_group_status: Option<String>, /// <p> Contains a list of <a>Subnet</a> elements. </p> pub subnets: Option<Vec<Subnet>>, /// <p>Provides the VpcId of the DB subnet group.</p> pub vpc_id: Option<String>, } struct DBSubnetGroupDeserializer; impl DBSubnetGroupDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBSubnetGroup, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBSubnetGroup::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBSubnetGroupArn" => { obj.db_subnet_group_arn = Some(try!(StringDeserializer::deserialize( "DBSubnetGroupArn", stack ))); } "DBSubnetGroupDescription" => { obj.db_subnet_group_description = Some(try!( StringDeserializer::deserialize("DBSubnetGroupDescription", stack) )); } "DBSubnetGroupName" => { obj.db_subnet_group_name = Some(try!(StringDeserializer::deserialize( "DBSubnetGroupName", stack ))); } "SubnetGroupStatus" => { obj.subnet_group_status = Some(try!(StringDeserializer::deserialize( "SubnetGroupStatus", stack ))); } "Subnets" => { obj.subnets = Some(try!(SubnetListDeserializer::deserialize("Subnets", stack))); } "VpcId" => { obj.vpc_id = Some(try!(StringDeserializer::deserialize("VpcId", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> Contains the result of a successful invocation of the <a>DescribeDBSubnetGroups</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DBSubnetGroupMessage { /// <p> A list of <a>DBSubnetGroup</a> instances. </p> pub db_subnet_groups: Option<Vec<DBSubnetGroup>>, /// <p> An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, } struct DBSubnetGroupMessageDeserializer; impl DBSubnetGroupMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DBSubnetGroupMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DBSubnetGroupMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBSubnetGroups" => { obj.db_subnet_groups = Some(try!(DBSubnetGroupsDeserializer::deserialize( "DBSubnetGroups", stack ))); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DBSubnetGroupsDeserializer; impl DBSubnetGroupsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DBSubnetGroup>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DBSubnetGroup" { obj.push(try!(DBSubnetGroupDeserializer::deserialize( "DBSubnetGroup", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDBClusterMessage { /// <p><p>The DB cluster identifier for the DB cluster to be deleted. This parameter isn&#39;t case-sensitive.</p> <p>Constraints:</p> <ul> <li> <p>Must match an existing DBClusterIdentifier.</p> </li> </ul></p> pub db_cluster_identifier: String, /// <p><p> The DB cluster snapshot identifier of the new DB cluster snapshot created when <code>SkipFinalSnapshot</code> is set to <code>false</code>. </p> <note> <p> Specifying this parameter and also setting the <code>SkipFinalShapshot</code> parameter to true results in an error. </p> </note> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 255 letters, numbers, or hyphens.</p> </li> <li> <p>First character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> </ul></p> pub final_db_snapshot_identifier: Option<String>, /// <p> Determines whether a final DB cluster snapshot is created before the DB cluster is deleted. If <code>true</code> is specified, no DB cluster snapshot is created. If <code>false</code> is specified, a DB cluster snapshot is created before the DB cluster is deleted. </p> <note> <p>You must specify a <code>FinalDBSnapshotIdentifier</code> parameter if <code>SkipFinalSnapshot</code> is <code>false</code>.</p> </note> <p>Default: <code>false</code> </p> pub skip_final_snapshot: Option<bool>, } /// Serialize `DeleteDBClusterMessage` contents to a `SignedRequest`. struct DeleteDBClusterMessageSerializer; impl DeleteDBClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteDBClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, ); if let Some(ref field_value) = obj.final_db_snapshot_identifier { params.put( &format!("{}{}", prefix, "FinalDBSnapshotIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.skip_final_snapshot { params.put( &format!("{}{}", prefix, "SkipFinalSnapshot"), &field_value.to_string(), ); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDBClusterParameterGroupMessage { /// <p><p>The name of the DB cluster parameter group.</p> <p>Constraints:</p> <ul> <li> <p>Must be the name of an existing DB cluster parameter group.</p> </li> <li> <p>You can&#39;t delete a default DB cluster parameter group.</p> </li> <li> <p>Cannot be associated with any DB clusters.</p> </li> </ul></p> pub db_cluster_parameter_group_name: String, } /// Serialize `DeleteDBClusterParameterGroupMessage` contents to a `SignedRequest`. struct DeleteDBClusterParameterGroupMessageSerializer; impl DeleteDBClusterParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteDBClusterParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterParameterGroupName"), &obj.db_cluster_parameter_group_name, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDBClusterResult { pub db_cluster: Option<DBCluster>, } struct DeleteDBClusterResultDeserializer; impl DeleteDBClusterResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteDBClusterResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeleteDBClusterResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBCluster" => { obj.db_cluster = Some(try!(DBClusterDeserializer::deserialize("DBCluster", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDBClusterSnapshotMessage { /// <p>The identifier of the DB cluster snapshot to delete.</p> <p>Constraints: Must be the name of an existing DB cluster snapshot in the <code>available</code> state.</p> pub db_cluster_snapshot_identifier: String, } /// Serialize `DeleteDBClusterSnapshotMessage` contents to a `SignedRequest`. struct DeleteDBClusterSnapshotMessageSerializer; impl DeleteDBClusterSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteDBClusterSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterSnapshotIdentifier"), &obj.db_cluster_snapshot_identifier, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDBClusterSnapshotResult { pub db_cluster_snapshot: Option<DBClusterSnapshot>, } struct DeleteDBClusterSnapshotResultDeserializer; impl DeleteDBClusterSnapshotResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteDBClusterSnapshotResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeleteDBClusterSnapshotResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterSnapshot" => { obj.db_cluster_snapshot = Some(try!( DBClusterSnapshotDeserializer::deserialize("DBClusterSnapshot", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDBInstanceMessage { /// <p><p>The DB instance identifier for the DB instance to be deleted. This parameter isn&#39;t case-sensitive.</p> <p>Constraints:</p> <ul> <li> <p>Must match the name of an existing DB instance.</p> </li> </ul></p> pub db_instance_identifier: String, /// <p><p> The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot is set to <code>false</code>. </p> <note> <p>Specifying this parameter and also setting the SkipFinalShapshot parameter to true results in an error.</p> </note> <p>Constraints:</p> <ul> <li> <p>Must be 1 to 255 letters or numbers.</p> </li> <li> <p>First character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> <li> <p>Cannot be specified when deleting a Read Replica.</p> </li> </ul></p> pub final_db_snapshot_identifier: Option<String>, /// <p> Determines whether a final DB snapshot is created before the DB instance is deleted. If <code>true</code> is specified, no DBSnapshot is created. If <code>false</code> is specified, a DB snapshot is created before the DB instance is deleted. </p> <p>Note that when a DB instance is in a failure state and has a status of 'failed', 'incompatible-restore', or 'incompatible-network', it can only be deleted when the SkipFinalSnapshot parameter is set to "true".</p> <p>Specify <code>true</code> when deleting a Read Replica.</p> <note> <p>The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot is <code>false</code>.</p> </note> <p>Default: <code>false</code> </p> pub skip_final_snapshot: Option<bool>, } /// Serialize `DeleteDBInstanceMessage` contents to a `SignedRequest`. struct DeleteDBInstanceMessageSerializer; impl DeleteDBInstanceMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteDBInstanceMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBInstanceIdentifier"), &obj.db_instance_identifier, ); if let Some(ref field_value) = obj.final_db_snapshot_identifier { params.put( &format!("{}{}", prefix, "FinalDBSnapshotIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.skip_final_snapshot { params.put( &format!("{}{}", prefix, "SkipFinalSnapshot"), &field_value.to_string(), ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDBInstanceResult { pub db_instance: Option<DBInstance>, } struct DeleteDBInstanceResultDeserializer; impl DeleteDBInstanceResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteDBInstanceResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeleteDBInstanceResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBInstance" => { obj.db_instance = Some(try!(DBInstanceDeserializer::deserialize( "DBInstance", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDBParameterGroupMessage { /// <p><p>The name of the DB parameter group.</p> <p>Constraints:</p> <ul> <li> <p>Must be the name of an existing DB parameter group</p> </li> <li> <p>You can&#39;t delete a default DB parameter group</p> </li> <li> <p>Cannot be associated with any DB instances</p> </li> </ul></p> pub db_parameter_group_name: String, } /// Serialize `DeleteDBParameterGroupMessage` contents to a `SignedRequest`. struct DeleteDBParameterGroupMessageSerializer; impl DeleteDBParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteDBParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBParameterGroupName"), &obj.db_parameter_group_name, ); } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteDBSubnetGroupMessage { /// <p>The name of the database subnet group to delete.</p> <note> <p>You can't delete the default subnet group.</p> </note> <p>Constraints:</p> <p>Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.</p> <p>Example: <code>mySubnetgroup</code> </p> pub db_subnet_group_name: String, } /// Serialize `DeleteDBSubnetGroupMessage` contents to a `SignedRequest`. struct DeleteDBSubnetGroupMessageSerializer; impl DeleteDBSubnetGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteDBSubnetGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBSubnetGroupName"), &obj.db_subnet_group_name, ); } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteEventSubscriptionMessage { /// <p>The name of the event notification subscription you want to delete.</p> pub subscription_name: String, } /// Serialize `DeleteEventSubscriptionMessage` contents to a `SignedRequest`. struct DeleteEventSubscriptionMessageSerializer; impl DeleteEventSubscriptionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DeleteEventSubscriptionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "SubscriptionName"), &obj.subscription_name, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DeleteEventSubscriptionResult { pub event_subscription: Option<EventSubscription>, } struct DeleteEventSubscriptionResultDeserializer; impl DeleteEventSubscriptionResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DeleteEventSubscriptionResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DeleteEventSubscriptionResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EventSubscription" => { obj.event_subscription = Some(try!( EventSubscriptionDeserializer::deserialize("EventSubscription", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBClusterParameterGroupsMessage { /// <p><p>The name of a specific DB cluster parameter group to return details for.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match the name of an existing DBClusterParameterGroup.</p> </li> </ul></p> pub db_cluster_parameter_group_name: Option<String>, /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous <code>DescribeDBClusterParameterGroups</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, } /// Serialize `DescribeDBClusterParameterGroupsMessage` contents to a `SignedRequest`. struct DescribeDBClusterParameterGroupsMessageSerializer; impl DescribeDBClusterParameterGroupsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDBClusterParameterGroupsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_cluster_parameter_group_name { params.put( &format!("{}{}", prefix, "DBClusterParameterGroupName"), &field_value, ); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBClusterParametersMessage { /// <p><p>The name of a specific DB cluster parameter group to return parameter details for.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match the name of an existing DBClusterParameterGroup.</p> </li> </ul></p> pub db_cluster_parameter_group_name: String, /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous <code>DescribeDBClusterParameters</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, /// <p> A value that indicates to return only parameters for a specific source. Parameter sources can be <code>engine</code>, <code>service</code>, or <code>customer</code>. </p> pub source: Option<String>, } /// Serialize `DescribeDBClusterParametersMessage` contents to a `SignedRequest`. struct DescribeDBClusterParametersMessageSerializer; impl DescribeDBClusterParametersMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDBClusterParametersMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterParameterGroupName"), &obj.db_cluster_parameter_group_name, ); if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.source { params.put(&format!("{}{}", prefix, "Source"), &field_value); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBClusterSnapshotAttributesMessage { /// <p>The identifier for the DB cluster snapshot to describe the attributes for.</p> pub db_cluster_snapshot_identifier: String, } /// Serialize `DescribeDBClusterSnapshotAttributesMessage` contents to a `SignedRequest`. struct DescribeDBClusterSnapshotAttributesMessageSerializer; impl DescribeDBClusterSnapshotAttributesMessageSerializer { fn serialize( params: &mut Params, name: &str, obj: &DescribeDBClusterSnapshotAttributesMessage, ) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterSnapshotIdentifier"), &obj.db_cluster_snapshot_identifier, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBClusterSnapshotAttributesResult { pub db_cluster_snapshot_attributes_result: Option<DBClusterSnapshotAttributesResult>, } struct DescribeDBClusterSnapshotAttributesResultDeserializer; impl DescribeDBClusterSnapshotAttributesResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DescribeDBClusterSnapshotAttributesResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DescribeDBClusterSnapshotAttributesResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterSnapshotAttributesResult" => { obj.db_cluster_snapshot_attributes_result = Some(try!( DBClusterSnapshotAttributesResultDeserializer::deserialize( "DBClusterSnapshotAttributesResult", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBClusterSnapshotsMessage { /// <p><p>The ID of the DB cluster to retrieve the list of DB cluster snapshots for. This parameter can&#39;t be used in conjunction with the <code>DBClusterSnapshotIdentifier</code> parameter. This parameter is not case-sensitive. </p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match the identifier of an existing DBCluster.</p> </li> </ul></p> pub db_cluster_identifier: Option<String>, /// <p><p>A specific DB cluster snapshot identifier to describe. This parameter can&#39;t be used in conjunction with the <code>DBClusterIdentifier</code> parameter. This value is stored as a lowercase string. </p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match the identifier of an existing DBClusterSnapshot.</p> </li> <li> <p>If this identifier is for an automated snapshot, the <code>SnapshotType</code> parameter must also be specified.</p> </li> </ul></p> pub db_cluster_snapshot_identifier: Option<String>, /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p>True to include manual DB cluster snapshots that are public and can be copied or restored by any AWS account, and otherwise false. The default is <code>false</code>. The default is false.</p> <p>You can share a manual DB cluster snapshot as public by using the <a>ModifyDBClusterSnapshotAttribute</a> API action.</p> pub include_public: Option<bool>, /// <p>True to include shared manual DB cluster snapshots from other AWS accounts that this AWS account has been given permission to copy or restore, and otherwise false. The default is <code>false</code>.</p> <p>You can give an AWS account permission to restore a manual DB cluster snapshot from another AWS account by the <a>ModifyDBClusterSnapshotAttribute</a> API action.</p> pub include_shared: Option<bool>, /// <p>An optional pagination token provided by a previous <code>DescribeDBClusterSnapshots</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p>The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, /// <p>The type of DB cluster snapshots to be returned. You can specify one of the following values:</p> <ul> <li> <p> <code>automated</code> - Return all DB cluster snapshots that have been automatically taken by Amazon Neptune for my AWS account.</p> </li> <li> <p> <code>manual</code> - Return all DB cluster snapshots that have been taken by my AWS account.</p> </li> <li> <p> <code>shared</code> - Return all manual DB cluster snapshots that have been shared to my AWS account.</p> </li> <li> <p> <code>public</code> - Return all DB cluster snapshots that have been marked as public.</p> </li> </ul> <p>If you don't specify a <code>SnapshotType</code> value, then both automated and manual DB cluster snapshots are returned. You can include shared DB cluster snapshots with these results by setting the <code>IncludeShared</code> parameter to <code>true</code>. You can include public DB cluster snapshots with these results by setting the <code>IncludePublic</code> parameter to <code>true</code>.</p> <p>The <code>IncludeShared</code> and <code>IncludePublic</code> parameters don't apply for <code>SnapshotType</code> values of <code>manual</code> or <code>automated</code>. The <code>IncludePublic</code> parameter doesn't apply when <code>SnapshotType</code> is set to <code>shared</code>. The <code>IncludeShared</code> parameter doesn't apply when <code>SnapshotType</code> is set to <code>public</code>.</p> pub snapshot_type: Option<String>, } /// Serialize `DescribeDBClusterSnapshotsMessage` contents to a `SignedRequest`. struct DescribeDBClusterSnapshotsMessageSerializer; impl DescribeDBClusterSnapshotsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDBClusterSnapshotsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_cluster_identifier { params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.db_cluster_snapshot_identifier { params.put( &format!("{}{}", prefix, "DBClusterSnapshotIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.include_public { params.put( &format!("{}{}", prefix, "IncludePublic"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.include_shared { params.put( &format!("{}{}", prefix, "IncludeShared"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.snapshot_type { params.put(&format!("{}{}", prefix, "SnapshotType"), &field_value); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBClustersMessage { /// <p><p>The user-supplied DB cluster identifier. If this parameter is specified, information from only the specific DB cluster is returned. This parameter isn&#39;t case-sensitive.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match an existing DBClusterIdentifier.</p> </li> </ul></p> pub db_cluster_identifier: Option<String>, /// <p><p>A filter that specifies one or more DB clusters to describe.</p> <p>Supported filters:</p> <ul> <li> <p> <code>db-cluster-id</code> - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB clusters identified by these ARNs.</p> </li> </ul></p> pub filters: Option<Vec<Filter>>, /// <p>An optional pagination token provided by a previous <a>DescribeDBClusters</a> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p>The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, } /// Serialize `DescribeDBClustersMessage` contents to a `SignedRequest`. struct DescribeDBClustersMessageSerializer; impl DescribeDBClustersMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDBClustersMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_cluster_identifier { params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBEngineVersionsMessage { /// <p><p>The name of a specific DB parameter group family to return details for.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match an existing DBParameterGroupFamily.</p> </li> </ul></p> pub db_parameter_group_family: Option<String>, /// <p>Indicates that only the default version of the specified engine or engine and major version combination is returned.</p> pub default_only: Option<bool>, /// <p>The database engine to return.</p> pub engine: Option<String>, /// <p>The database engine version to return.</p> <p>Example: <code>5.1.49</code> </p> pub engine_version: Option<String>, /// <p>Not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p>If this parameter is specified and the requested engine supports the <code>CharacterSetName</code> parameter for <code>CreateDBInstance</code>, the response includes a list of supported character sets for each engine version. </p> pub list_supported_character_sets: Option<bool>, /// <p>If this parameter is specified and the requested engine supports the <code>TimeZone</code> parameter for <code>CreateDBInstance</code>, the response includes a list of supported time zones for each engine version. </p> pub list_supported_timezones: Option<bool>, /// <p> An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more than the <code>MaxRecords</code> value is available, a pagination token called a marker is included in the response so that the following results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, } /// Serialize `DescribeDBEngineVersionsMessage` contents to a `SignedRequest`. struct DescribeDBEngineVersionsMessageSerializer; impl DescribeDBEngineVersionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDBEngineVersionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_parameter_group_family { params.put( &format!("{}{}", prefix, "DBParameterGroupFamily"), &field_value, ); } if let Some(ref field_value) = obj.default_only { params.put( &format!("{}{}", prefix, "DefaultOnly"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.engine { params.put(&format!("{}{}", prefix, "Engine"), &field_value); } if let Some(ref field_value) = obj.engine_version { params.put(&format!("{}{}", prefix, "EngineVersion"), &field_value); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.list_supported_character_sets { params.put( &format!("{}{}", prefix, "ListSupportedCharacterSets"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.list_supported_timezones { params.put( &format!("{}{}", prefix, "ListSupportedTimezones"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBInstancesMessage { /// <p><p>The user-supplied instance identifier. If this parameter is specified, information from only the specific DB instance is returned. This parameter isn&#39;t case-sensitive.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match the identifier of an existing DBInstance.</p> </li> </ul></p> pub db_instance_identifier: Option<String>, /// <p><p>A filter that specifies one or more DB instances to describe.</p> <p>Supported filters:</p> <ul> <li> <p> <code>db-cluster-id</code> - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include information about the DB instances associated with the DB clusters identified by these ARNs.</p> </li> <li> <p> <code>db-instance-id</code> - Accepts DB instance identifiers and DB instance Amazon Resource Names (ARNs). The results list will only include information about the DB instances identified by these ARNs.</p> </li> </ul></p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous <code>DescribeDBInstances</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, } /// Serialize `DescribeDBInstancesMessage` contents to a `SignedRequest`. struct DescribeDBInstancesMessageSerializer; impl DescribeDBInstancesMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDBInstancesMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_instance_identifier { params.put( &format!("{}{}", prefix, "DBInstanceIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBParameterGroupsMessage { /// <p><p>The name of a specific DB parameter group to return details for.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match the name of an existing DBClusterParameterGroup.</p> </li> </ul></p> pub db_parameter_group_name: Option<String>, /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous <code>DescribeDBParameterGroups</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, } /// Serialize `DescribeDBParameterGroupsMessage` contents to a `SignedRequest`. struct DescribeDBParameterGroupsMessageSerializer; impl DescribeDBParameterGroupsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDBParameterGroupsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_parameter_group_name { params.put( &format!("{}{}", prefix, "DBParameterGroupName"), &field_value, ); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBParametersMessage { /// <p><p>The name of a specific DB parameter group to return details for.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match the name of an existing DBParameterGroup.</p> </li> </ul></p> pub db_parameter_group_name: String, /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous <code>DescribeDBParameters</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, /// <p>The parameter types to return.</p> <p>Default: All parameter types returned</p> <p>Valid Values: <code>user | system | engine-default</code> </p> pub source: Option<String>, } /// Serialize `DescribeDBParametersMessage` contents to a `SignedRequest`. struct DescribeDBParametersMessageSerializer; impl DescribeDBParametersMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDBParametersMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBParameterGroupName"), &obj.db_parameter_group_name, ); if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.source { params.put(&format!("{}{}", prefix, "Source"), &field_value); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeDBSubnetGroupsMessage { /// <p>The name of the DB subnet group to return details for.</p> pub db_subnet_group_name: Option<String>, /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous DescribeDBSubnetGroups request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, } /// Serialize `DescribeDBSubnetGroupsMessage` contents to a `SignedRequest`. struct DescribeDBSubnetGroupsMessageSerializer; impl DescribeDBSubnetGroupsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeDBSubnetGroupsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeEngineDefaultClusterParametersMessage { /// <p>The name of the DB cluster parameter group family to return engine parameter information for.</p> pub db_parameter_group_family: String, /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous <code>DescribeEngineDefaultClusterParameters</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, } /// Serialize `DescribeEngineDefaultClusterParametersMessage` contents to a `SignedRequest`. struct DescribeEngineDefaultClusterParametersMessageSerializer; impl DescribeEngineDefaultClusterParametersMessageSerializer { fn serialize( params: &mut Params, name: &str, obj: &DescribeEngineDefaultClusterParametersMessage, ) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBParameterGroupFamily"), &obj.db_parameter_group_family, ); if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeEngineDefaultClusterParametersResult { pub engine_defaults: Option<EngineDefaults>, } struct DescribeEngineDefaultClusterParametersResultDeserializer; impl DescribeEngineDefaultClusterParametersResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DescribeEngineDefaultClusterParametersResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DescribeEngineDefaultClusterParametersResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EngineDefaults" => { obj.engine_defaults = Some(try!(EngineDefaultsDeserializer::deserialize( "EngineDefaults", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeEngineDefaultParametersMessage { /// <p>The name of the DB parameter group family.</p> pub db_parameter_group_family: String, /// <p>Not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous <code>DescribeEngineDefaultParameters</code> request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, } /// Serialize `DescribeEngineDefaultParametersMessage` contents to a `SignedRequest`. struct DescribeEngineDefaultParametersMessageSerializer; impl DescribeEngineDefaultParametersMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeEngineDefaultParametersMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBParameterGroupFamily"), &obj.db_parameter_group_family, ); if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeEngineDefaultParametersResult { pub engine_defaults: Option<EngineDefaults>, } struct DescribeEngineDefaultParametersResultDeserializer; impl DescribeEngineDefaultParametersResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DescribeEngineDefaultParametersResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DescribeEngineDefaultParametersResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EngineDefaults" => { obj.engine_defaults = Some(try!(EngineDefaultsDeserializer::deserialize( "EngineDefaults", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeEventCategoriesMessage { /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p>The type of source that is generating the events.</p> <p>Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot</p> pub source_type: Option<String>, } /// Serialize `DescribeEventCategoriesMessage` contents to a `SignedRequest`. struct DescribeEventCategoriesMessageSerializer; impl DescribeEventCategoriesMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeEventCategoriesMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.source_type { params.put(&format!("{}{}", prefix, "SourceType"), &field_value); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeEventSubscriptionsMessage { /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code> . </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, /// <p>The name of the event notification subscription you want to describe.</p> pub subscription_name: Option<String>, } /// Serialize `DescribeEventSubscriptionsMessage` contents to a `SignedRequest`. struct DescribeEventSubscriptionsMessageSerializer; impl DescribeEventSubscriptionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeEventSubscriptionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.subscription_name { params.put(&format!("{}{}", prefix, "SubscriptionName"), &field_value); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeEventsMessage { /// <p>The number of minutes to retrieve events for.</p> <p>Default: 60</p> pub duration: Option<i64>, /// <p> The end of the time interval for which to retrieve events, specified in ISO 8601 format. For more information about ISO 8601, go to the <a href="http://en.wikipedia.org/wiki/ISO_8601">ISO8601 Wikipedia page.</a> </p> <p>Example: 2009-07-08T18:00Z</p> pub end_time: Option<String>, /// <p>A list of event categories that trigger notifications for a event notification subscription.</p> pub event_categories: Option<Vec<String>>, /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous DescribeEvents request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, /// <p><p>The identifier of the event source for which events are returned. If not specified, then all sources are included in the response.</p> <p>Constraints:</p> <ul> <li> <p>If SourceIdentifier is supplied, SourceType must also be provided.</p> </li> <li> <p>If the source type is <code>DBInstance</code>, then a <code>DBInstanceIdentifier</code> must be supplied.</p> </li> <li> <p>If the source type is <code>DBSecurityGroup</code>, a <code>DBSecurityGroupName</code> must be supplied.</p> </li> <li> <p>If the source type is <code>DBParameterGroup</code>, a <code>DBParameterGroupName</code> must be supplied.</p> </li> <li> <p>If the source type is <code>DBSnapshot</code>, a <code>DBSnapshotIdentifier</code> must be supplied.</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens.</p> </li> </ul></p> pub source_identifier: Option<String>, /// <p>The event source to retrieve events for. If no value is specified, all events are returned.</p> pub source_type: Option<String>, /// <p> The beginning of the time interval to retrieve events for, specified in ISO 8601 format. For more information about ISO 8601, go to the <a href="http://en.wikipedia.org/wiki/ISO_8601">ISO8601 Wikipedia page.</a> </p> <p>Example: 2009-07-08T18:00Z</p> pub start_time: Option<String>, } /// Serialize `DescribeEventsMessage` contents to a `SignedRequest`. struct DescribeEventsMessageSerializer; impl DescribeEventsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeEventsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.duration { params.put( &format!("{}{}", prefix, "Duration"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.end_time { params.put(&format!("{}{}", prefix, "EndTime"), &field_value); } if let Some(ref field_value) = obj.event_categories { EventCategoriesListSerializer::serialize( params, &format!("{}{}", prefix, "EventCategory"), field_value, ); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.source_identifier { params.put(&format!("{}{}", prefix, "SourceIdentifier"), &field_value); } if let Some(ref field_value) = obj.source_type { params.put(&format!("{}{}", prefix, "SourceType"), &field_value); } if let Some(ref field_value) = obj.start_time { params.put(&format!("{}{}", prefix, "StartTime"), &field_value); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeOrderableDBInstanceOptionsMessage { /// <p>The DB instance class filter value. Specify this parameter to show only the available offerings matching the specified DB instance class.</p> pub db_instance_class: Option<String>, /// <p>The name of the engine to retrieve DB instance options for.</p> pub engine: String, /// <p>The engine version filter value. Specify this parameter to show only the available offerings matching the specified engine version.</p> pub engine_version: Option<String>, /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p>The license model filter value. Specify this parameter to show only the available offerings matching the specified license model.</p> pub license_model: Option<String>, /// <p> An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code> . </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, /// <p>The VPC filter value. Specify this parameter to show only the available VPC or non-VPC offerings.</p> pub vpc: Option<bool>, } /// Serialize `DescribeOrderableDBInstanceOptionsMessage` contents to a `SignedRequest`. struct DescribeOrderableDBInstanceOptionsMessageSerializer; impl DescribeOrderableDBInstanceOptionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribeOrderableDBInstanceOptionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_instance_class { params.put(&format!("{}{}", prefix, "DBInstanceClass"), &field_value); } params.put(&format!("{}{}", prefix, "Engine"), &obj.engine); if let Some(ref field_value) = obj.engine_version { params.put(&format!("{}{}", prefix, "EngineVersion"), &field_value); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.license_model { params.put(&format!("{}{}", prefix, "LicenseModel"), &field_value); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.vpc { params.put(&format!("{}{}", prefix, "Vpc"), &field_value.to_string()); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribePendingMaintenanceActionsMessage { /// <p><p>A filter that specifies one or more resources to return pending maintenance actions for.</p> <p>Supported filters:</p> <ul> <li> <p> <code>db-cluster-id</code> - Accepts DB cluster identifiers and DB cluster Amazon Resource Names (ARNs). The results list will only include pending maintenance actions for the DB clusters identified by these ARNs.</p> </li> <li> <p> <code>db-instance-id</code> - Accepts DB instance identifiers and DB instance ARNs. The results list will only include pending maintenance actions for the DB instances identified by these ARNs.</p> </li> </ul></p> pub filters: Option<Vec<Filter>>, /// <p> An optional pagination token provided by a previous <code>DescribePendingMaintenanceActions</code> request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p> The maximum number of records to include in the response. If more records exist than the specified <code>MaxRecords</code> value, a pagination token called a marker is included in the response so that the remaining results can be retrieved. </p> <p>Default: 100</p> <p>Constraints: Minimum 20, maximum 100.</p> pub max_records: Option<i64>, /// <p>The ARN of a resource to return pending maintenance actions for.</p> pub resource_identifier: Option<String>, } /// Serialize `DescribePendingMaintenanceActionsMessage` contents to a `SignedRequest`. struct DescribePendingMaintenanceActionsMessageSerializer; impl DescribePendingMaintenanceActionsMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &DescribePendingMaintenanceActionsMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } if let Some(ref field_value) = obj.marker { params.put(&format!("{}{}", prefix, "Marker"), &field_value); } if let Some(ref field_value) = obj.max_records { params.put( &format!("{}{}", prefix, "MaxRecords"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.resource_identifier { params.put(&format!("{}{}", prefix, "ResourceIdentifier"), &field_value); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeValidDBInstanceModificationsMessage { /// <p>The customer identifier or the ARN of your DB instance. </p> pub db_instance_identifier: String, } /// Serialize `DescribeValidDBInstanceModificationsMessage` contents to a `SignedRequest`. struct DescribeValidDBInstanceModificationsMessageSerializer; impl DescribeValidDBInstanceModificationsMessageSerializer { fn serialize( params: &mut Params, name: &str, obj: &DescribeValidDBInstanceModificationsMessage, ) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBInstanceIdentifier"), &obj.db_instance_identifier, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct DescribeValidDBInstanceModificationsResult { pub valid_db_instance_modifications_message: Option<ValidDBInstanceModificationsMessage>, } struct DescribeValidDBInstanceModificationsResultDeserializer; impl DescribeValidDBInstanceModificationsResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DescribeValidDBInstanceModificationsResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DescribeValidDBInstanceModificationsResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "ValidDBInstanceModificationsMessage" => { obj.valid_db_instance_modifications_message = Some(try!( ValidDBInstanceModificationsMessageDeserializer::deserialize( "ValidDBInstanceModificationsMessage", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>An Active Directory Domain membership record associated with the DB instance.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DomainMembership { /// <p>The identifier of the Active Directory Domain.</p> pub domain: Option<String>, /// <p>The fully qualified domain name of the Active Directory Domain.</p> pub fqdn: Option<String>, /// <p>The name of the IAM role to be used when making API calls to the Directory Service.</p> pub iam_role_name: Option<String>, /// <p>The status of the DB instance's Active Directory Domain membership, such as joined, pending-join, failed etc).</p> pub status: Option<String>, } struct DomainMembershipDeserializer; impl DomainMembershipDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DomainMembership, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DomainMembership::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Domain" => { obj.domain = Some(try!(StringDeserializer::deserialize("Domain", stack))); } "FQDN" => { obj.fqdn = Some(try!(StringDeserializer::deserialize("FQDN", stack))); } "IAMRoleName" => { obj.iam_role_name = Some(try!(StringDeserializer::deserialize("IAMRoleName", stack))); } "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DomainMembershipListDeserializer; impl DomainMembershipListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DomainMembership>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DomainMembership" { obj.push(try!(DomainMembershipDeserializer::deserialize( "DomainMembership", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct DoubleDeserializer; impl DoubleDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<f64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = f64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct DoubleOptionalDeserializer; impl DoubleOptionalDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<f64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = f64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>A range of double values.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct DoubleRange { /// <p>The minimum value in the range.</p> pub from: Option<f64>, /// <p>The maximum value in the range.</p> pub to: Option<f64>, } struct DoubleRangeDeserializer; impl DoubleRangeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<DoubleRange, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = DoubleRange::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "From" => { obj.from = Some(try!(DoubleDeserializer::deserialize("From", stack))); } "To" => { obj.to = Some(try!(DoubleDeserializer::deserialize("To", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct DoubleRangeListDeserializer; impl DoubleRangeListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<DoubleRange>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "DoubleRange" { obj.push(try!(DoubleRangeDeserializer::deserialize( "DoubleRange", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p><p>This data type is used as a response element in the following actions:</p> <ul> <li> <p> <a>CreateDBInstance</a> </p> </li> <li> <p> <a>DescribeDBInstances</a> </p> </li> <li> <p> <a>DeleteDBInstance</a> </p> </li> </ul></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct Endpoint { /// <p>Specifies the DNS address of the DB instance.</p> pub address: Option<String>, /// <p>Specifies the ID that Amazon Route 53 assigns when you create a hosted zone.</p> pub hosted_zone_id: Option<String>, /// <p>Specifies the port that the database engine is listening on.</p> pub port: Option<i64>, } struct EndpointDeserializer; impl EndpointDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Endpoint, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Endpoint::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Address" => { obj.address = Some(try!(StringDeserializer::deserialize("Address", stack))); } "HostedZoneId" => { obj.hosted_zone_id = Some(try!(StringDeserializer::deserialize("HostedZoneId", stack))); } "Port" => { obj.port = Some(try!(IntegerDeserializer::deserialize("Port", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> Contains the result of a successful invocation of the <a>DescribeEngineDefaultParameters</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct EngineDefaults { /// <p>Specifies the name of the DB parameter group family that the engine default parameters apply to.</p> pub db_parameter_group_family: Option<String>, /// <p> An optional pagination token provided by a previous EngineDefaults request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code> . </p> pub marker: Option<String>, /// <p>Contains a list of engine default parameters.</p> pub parameters: Option<Vec<Parameter>>, } struct EngineDefaultsDeserializer; impl EngineDefaultsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EngineDefaults, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = EngineDefaults::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBParameterGroupFamily" => { obj.db_parameter_group_family = Some(try!( StringDeserializer::deserialize("DBParameterGroupFamily", stack) )); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } "Parameters" => { obj.parameters = Some(try!(ParametersListDeserializer::deserialize( "Parameters", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> This data type is used as a response element in the <a>DescribeEvents</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct Event { /// <p>Specifies the date and time of the event.</p> pub date: Option<String>, /// <p>Specifies the category for the event.</p> pub event_categories: Option<Vec<String>>, /// <p>Provides the text of this event.</p> pub message: Option<String>, /// <p>The Amazon Resource Name (ARN) for the event.</p> pub source_arn: Option<String>, /// <p>Provides the identifier for the source of the event.</p> pub source_identifier: Option<String>, /// <p>Specifies the source type for this event.</p> pub source_type: Option<String>, } struct EventDeserializer; impl EventDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Event, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Event::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Date" => { obj.date = Some(try!(TStampDeserializer::deserialize("Date", stack))); } "EventCategories" => { obj.event_categories = Some(try!( EventCategoriesListDeserializer::deserialize("EventCategories", stack) )); } "Message" => { obj.message = Some(try!(StringDeserializer::deserialize("Message", stack))); } "SourceArn" => { obj.source_arn = Some(try!(StringDeserializer::deserialize("SourceArn", stack))); } "SourceIdentifier" => { obj.source_identifier = Some(try!(StringDeserializer::deserialize( "SourceIdentifier", stack ))); } "SourceType" => { obj.source_type = Some(try!(SourceTypeDeserializer::deserialize( "SourceType", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct EventCategoriesListDeserializer; impl EventCategoriesListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "EventCategory" { obj.push(try!(StringDeserializer::deserialize( "EventCategory", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Serialize `EventCategoriesList` contents to a `SignedRequest`. struct EventCategoriesListSerializer; impl EventCategoriesListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } /// <p>Contains the results of a successful invocation of the <a>DescribeEventCategories</a> action.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct EventCategoriesMap { /// <p>The event categories for the specified source type</p> pub event_categories: Option<Vec<String>>, /// <p>The source type that the returned categories belong to</p> pub source_type: Option<String>, } struct EventCategoriesMapDeserializer; impl EventCategoriesMapDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EventCategoriesMap, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = EventCategoriesMap::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EventCategories" => { obj.event_categories = Some(try!( EventCategoriesListDeserializer::deserialize("EventCategories", stack) )); } "SourceType" => { obj.source_type = Some(try!(StringDeserializer::deserialize("SourceType", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct EventCategoriesMapListDeserializer; impl EventCategoriesMapListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<EventCategoriesMap>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "EventCategoriesMap" { obj.push(try!(EventCategoriesMapDeserializer::deserialize( "EventCategoriesMap", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Data returned from the <b>DescribeEventCategories</b> action.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct EventCategoriesMessage { /// <p>A list of EventCategoriesMap data types.</p> pub event_categories_map_list: Option<Vec<EventCategoriesMap>>, } struct EventCategoriesMessageDeserializer; impl EventCategoriesMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EventCategoriesMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = EventCategoriesMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EventCategoriesMapList" => { obj.event_categories_map_list = Some(try!(EventCategoriesMapListDeserializer::deserialize( "EventCategoriesMapList", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct EventListDeserializer; impl EventListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Event>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Event" { obj.push(try!(EventDeserializer::deserialize("Event", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Contains the results of a successful invocation of the <a>DescribeEventSubscriptions</a> action.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct EventSubscription { /// <p>The event notification subscription Id.</p> pub cust_subscription_id: Option<String>, /// <p>The AWS customer account associated with the event notification subscription.</p> pub customer_aws_id: Option<String>, /// <p>A Boolean value indicating if the subscription is enabled. True indicates the subscription is enabled.</p> pub enabled: Option<bool>, /// <p>A list of event categories for the event notification subscription.</p> pub event_categories_list: Option<Vec<String>>, /// <p>The Amazon Resource Name (ARN) for the event subscription.</p> pub event_subscription_arn: Option<String>, /// <p>The topic ARN of the event notification subscription.</p> pub sns_topic_arn: Option<String>, /// <p>A list of source IDs for the event notification subscription.</p> pub source_ids_list: Option<Vec<String>>, /// <p>The source type for the event notification subscription.</p> pub source_type: Option<String>, /// <p>The status of the event notification subscription.</p> <p>Constraints:</p> <p>Can be one of the following: creating | modifying | deleting | active | no-permission | topic-not-exist</p> <p>The status "no-permission" indicates that Neptune no longer has permission to post to the SNS topic. The status "topic-not-exist" indicates that the topic was deleted after the subscription was created.</p> pub status: Option<String>, /// <p>The time the event notification subscription was created.</p> pub subscription_creation_time: Option<String>, } struct EventSubscriptionDeserializer; impl EventSubscriptionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EventSubscription, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = EventSubscription::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "CustSubscriptionId" => { obj.cust_subscription_id = Some(try!(StringDeserializer::deserialize( "CustSubscriptionId", stack ))); } "CustomerAwsId" => { obj.customer_aws_id = Some(try!(StringDeserializer::deserialize( "CustomerAwsId", stack ))); } "Enabled" => { obj.enabled = Some(try!(BooleanDeserializer::deserialize("Enabled", stack))); } "EventCategoriesList" => { obj.event_categories_list = Some(try!(EventCategoriesListDeserializer::deserialize( "EventCategoriesList", stack ))); } "EventSubscriptionArn" => { obj.event_subscription_arn = Some(try!(StringDeserializer::deserialize( "EventSubscriptionArn", stack ))); } "SnsTopicArn" => { obj.sns_topic_arn = Some(try!(StringDeserializer::deserialize("SnsTopicArn", stack))); } "SourceIdsList" => { obj.source_ids_list = Some(try!(SourceIdsListDeserializer::deserialize( "SourceIdsList", stack ))); } "SourceType" => { obj.source_type = Some(try!(StringDeserializer::deserialize("SourceType", stack))); } "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } "SubscriptionCreationTime" => { obj.subscription_creation_time = Some(try!( StringDeserializer::deserialize("SubscriptionCreationTime", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct EventSubscriptionsListDeserializer; impl EventSubscriptionsListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<EventSubscription>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "EventSubscription" { obj.push(try!(EventSubscriptionDeserializer::deserialize( "EventSubscription", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Data returned by the <b>DescribeEventSubscriptions</b> action.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct EventSubscriptionsMessage { /// <p>A list of EventSubscriptions data types.</p> pub event_subscriptions_list: Option<Vec<EventSubscription>>, /// <p> An optional pagination token provided by a previous DescribeOrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, } struct EventSubscriptionsMessageDeserializer; impl EventSubscriptionsMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EventSubscriptionsMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = EventSubscriptionsMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EventSubscriptionsList" => { obj.event_subscriptions_list = Some(try!(EventSubscriptionsListDeserializer::deserialize( "EventSubscriptionsList", stack ))); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> Contains the result of a successful invocation of the <a>DescribeEvents</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct EventsMessage { /// <p> A list of <a>Event</a> instances. </p> pub events: Option<Vec<Event>>, /// <p> An optional pagination token provided by a previous Events request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code> . </p> pub marker: Option<String>, } struct EventsMessageDeserializer; impl EventsMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<EventsMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = EventsMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Events" => { obj.events = Some(try!(EventListDeserializer::deserialize("Events", stack))); } "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct FailoverDBClusterMessage { /// <p><p>A DB cluster identifier to force a failover for. This parameter is not case-sensitive.</p> <p>Constraints:</p> <ul> <li> <p>Must match the identifier of an existing DBCluster.</p> </li> </ul></p> pub db_cluster_identifier: Option<String>, /// <p>The name of the instance to promote to the primary instance.</p> <p>You must specify the instance identifier for an Read Replica in the DB cluster. For example, <code>mydbcluster-replica1</code>.</p> pub target_db_instance_identifier: Option<String>, } /// Serialize `FailoverDBClusterMessage` contents to a `SignedRequest`. struct FailoverDBClusterMessageSerializer; impl FailoverDBClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &FailoverDBClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_cluster_identifier { params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.target_db_instance_identifier { params.put( &format!("{}{}", prefix, "TargetDBInstanceIdentifier"), &field_value, ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct FailoverDBClusterResult { pub db_cluster: Option<DBCluster>, } struct FailoverDBClusterResultDeserializer; impl FailoverDBClusterResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<FailoverDBClusterResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = FailoverDBClusterResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBCluster" => { obj.db_cluster = Some(try!(DBClusterDeserializer::deserialize("DBCluster", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>This type is not currently supported.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct Filter { /// <p>This parameter is not currently supported.</p> pub name: String, /// <p>This parameter is not currently supported.</p> pub values: Vec<String>, } /// Serialize `Filter` contents to a `SignedRequest`. struct FilterSerializer; impl FilterSerializer { fn serialize(params: &mut Params, name: &str, obj: &Filter) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "Name"), &obj.name); FilterValueListSerializer::serialize( params, &format!("{}{}", prefix, "Value"), &obj.values, ); } } /// Serialize `FilterList` contents to a `SignedRequest`. struct FilterListSerializer; impl FilterListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<Filter>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); FilterSerializer::serialize(params, &key, obj); } } } /// Serialize `FilterValueList` contents to a `SignedRequest`. struct FilterValueListSerializer; impl FilterValueListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } struct IntegerDeserializer; impl IntegerDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } struct IntegerOptionalDeserializer; impl IntegerOptionalDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<i64, XmlParseError> { try!(start_element(tag_name, stack)); let obj = i64::from_str(try!(characters(stack)).as_ref()).unwrap(); try!(end_element(tag_name, stack)); Ok(obj) } } /// Serialize `KeyList` contents to a `SignedRequest`. struct KeyListSerializer; impl KeyListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ListTagsForResourceMessage { /// <p>This parameter is not currently supported.</p> pub filters: Option<Vec<Filter>>, /// <p>The Amazon Neptune resource with tags to be listed. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see <a href="http://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing"> Constructing an Amazon Resource Name (ARN)</a>.</p> pub resource_name: String, } /// Serialize `ListTagsForResourceMessage` contents to a `SignedRequest`. struct ListTagsForResourceMessageSerializer; impl ListTagsForResourceMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ListTagsForResourceMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.filters { FilterListSerializer::serialize( params, &format!("{}{}", prefix, "Filter"), field_value, ); } params.put(&format!("{}{}", prefix, "ResourceName"), &obj.resource_name); } } struct LogTypeListDeserializer; impl LogTypeListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "member" { obj.push(try!(StringDeserializer::deserialize("member", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Serialize `LogTypeList` contents to a `SignedRequest`. struct LogTypeListSerializer; impl LogTypeListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBClusterMessage { /// <p>A value that specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the <code>PreferredMaintenanceWindow</code> setting for the DB cluster. If this parameter is set to <code>false</code>, changes to the DB cluster are applied during the next maintenance window.</p> <p>The <code>ApplyImmediately</code> parameter only affects the <code>NewDBClusterIdentifier</code> and <code>MasterUserPassword</code> values. If you set the <code>ApplyImmediately</code> parameter value to false, then changes to the <code>NewDBClusterIdentifier</code> and <code>MasterUserPassword</code> values are applied during the next maintenance window. All other changes are applied immediately, regardless of the value of the <code>ApplyImmediately</code> parameter.</p> <p>Default: <code>false</code> </p> pub apply_immediately: Option<bool>, /// <p><p>The number of days for which automated backups are retained. You must specify a minimum value of 1.</p> <p>Default: 1</p> <p>Constraints:</p> <ul> <li> <p>Must be a value from 1 to 35</p> </li> </ul></p> pub backup_retention_period: Option<i64>, /// <p><p>The DB cluster identifier for the cluster being modified. This parameter is not case-sensitive.</p> <p>Constraints:</p> <ul> <li> <p>Must match the identifier of an existing DBCluster.</p> </li> </ul></p> pub db_cluster_identifier: String, /// <p>The name of the DB cluster parameter group to use for the DB cluster.</p> pub db_cluster_parameter_group_name: Option<String>, /// <p>True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.</p> <p>Default: <code>false</code> </p> pub enable_iam_database_authentication: Option<bool>, /// <p>The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless the ApplyImmediately parameter is set to true.</p> <p>For a list of valid engine versions, see <a>CreateDBInstance</a>, or call <a>DescribeDBEngineVersions</a>.</p> pub engine_version: Option<String>, /// <p>The new password for the master database user. This password can contain any printable ASCII character except "/", """, or "@".</p> <p>Constraints: Must contain from 8 to 41 characters.</p> pub master_user_password: Option<String>, /// <p>The new DB cluster identifier for the DB cluster when renaming a DB cluster. This value is stored as a lowercase string.</p> <p>Constraints:</p> <ul> <li> <p>Must contain from 1 to 63 letters, numbers, or hyphens</p> </li> <li> <p>The first character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> </ul> <p>Example: <code>my-cluster2</code> </p> pub new_db_cluster_identifier: Option<String>, /// <p>A value that indicates that the DB cluster should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case, and the change is applied during the next maintenance window unless the <code>ApplyImmediately</code> parameter is set to <code>true</code> for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted. </p> <p>Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.</p> pub option_group_name: Option<String>, /// <p>The port number on which the DB cluster accepts connections.</p> <p>Constraints: Value must be <code>1150-65535</code> </p> <p>Default: The same port as the original DB cluster.</p> pub port: Option<i64>, /// <p><p>The daily time range during which automated backups are created if automated backups are enabled, using the <code>BackupRetentionPeriod</code> parameter. </p> <p>The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region. </p> <p>Constraints:</p> <ul> <li> <p>Must be in the format <code>hh24:mi-hh24:mi</code>.</p> </li> <li> <p>Must be in Universal Coordinated Time (UTC).</p> </li> <li> <p>Must not conflict with the preferred maintenance window.</p> </li> <li> <p>Must be at least 30 minutes.</p> </li> </ul></p> pub preferred_backup_window: Option<String>, /// <p>The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).</p> <p>Format: <code>ddd:hh24:mi-ddd:hh24:mi</code> </p> <p>The default is a 30-minute window selected at random from an 8-hour block of time for each AWS Region, occurring on a random day of the week. </p> <p>Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.</p> <p>Constraints: Minimum 30-minute window.</p> pub preferred_maintenance_window: Option<String>, /// <p>A list of VPC security groups that the DB cluster will belong to.</p> pub vpc_security_group_ids: Option<Vec<String>>, } /// Serialize `ModifyDBClusterMessage` contents to a `SignedRequest`. struct ModifyDBClusterMessageSerializer; impl ModifyDBClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyDBClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.apply_immediately { params.put( &format!("{}{}", prefix, "ApplyImmediately"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.backup_retention_period { params.put( &format!("{}{}", prefix, "BackupRetentionPeriod"), &field_value.to_string(), ); } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, ); if let Some(ref field_value) = obj.db_cluster_parameter_group_name { params.put( &format!("{}{}", prefix, "DBClusterParameterGroupName"), &field_value, ); } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.engine_version { params.put(&format!("{}{}", prefix, "EngineVersion"), &field_value); } if let Some(ref field_value) = obj.master_user_password { params.put(&format!("{}{}", prefix, "MasterUserPassword"), &field_value); } if let Some(ref field_value) = obj.new_db_cluster_identifier { params.put( &format!("{}{}", prefix, "NewDBClusterIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.option_group_name { params.put(&format!("{}{}", prefix, "OptionGroupName"), &field_value); } if let Some(ref field_value) = obj.port { params.put(&format!("{}{}", prefix, "Port"), &field_value.to_string()); } if let Some(ref field_value) = obj.preferred_backup_window { params.put( &format!("{}{}", prefix, "PreferredBackupWindow"), &field_value, ); } if let Some(ref field_value) = obj.preferred_maintenance_window { params.put( &format!("{}{}", prefix, "PreferredMaintenanceWindow"), &field_value, ); } if let Some(ref field_value) = obj.vpc_security_group_ids { VpcSecurityGroupIdListSerializer::serialize( params, &format!("{}{}", prefix, "VpcSecurityGroupId"), field_value, ); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBClusterParameterGroupMessage { /// <p>The name of the DB cluster parameter group to modify.</p> pub db_cluster_parameter_group_name: String, /// <p>A list of parameters in the DB cluster parameter group to modify.</p> pub parameters: Vec<Parameter>, } /// Serialize `ModifyDBClusterParameterGroupMessage` contents to a `SignedRequest`. struct ModifyDBClusterParameterGroupMessageSerializer; impl ModifyDBClusterParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyDBClusterParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterParameterGroupName"), &obj.db_cluster_parameter_group_name, ); ParametersListSerializer::serialize( params, &format!("{}{}", prefix, "Parameter"), &obj.parameters, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBClusterResult { pub db_cluster: Option<DBCluster>, } struct ModifyDBClusterResultDeserializer; impl ModifyDBClusterResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ModifyDBClusterResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ModifyDBClusterResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBCluster" => { obj.db_cluster = Some(try!(DBClusterDeserializer::deserialize("DBCluster", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBClusterSnapshotAttributeMessage { /// <p>The name of the DB cluster snapshot attribute to modify.</p> <p>To manage authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this value to <code>restore</code>.</p> pub attribute_name: String, /// <p>The identifier for the DB cluster snapshot to modify the attributes for.</p> pub db_cluster_snapshot_identifier: String, /// <p>A list of DB cluster snapshot attributes to add to the attribute specified by <code>AttributeName</code>.</p> <p>To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account IDs, or <code>all</code> to make the manual DB cluster snapshot restorable by any AWS account. Do not add the <code>all</code> value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts.</p> pub values_to_add: Option<Vec<String>>, /// <p>A list of DB cluster snapshot attributes to remove from the attribute specified by <code>AttributeName</code>.</p> <p>To remove authorization for other AWS accounts to copy or restore a manual DB cluster snapshot, set this list to include one or more AWS account identifiers, or <code>all</code> to remove authorization for any AWS account to copy or restore the DB cluster snapshot. If you specify <code>all</code>, an AWS account whose account ID is explicitly added to the <code>restore</code> attribute can still copy or restore a manual DB cluster snapshot.</p> pub values_to_remove: Option<Vec<String>>, } /// Serialize `ModifyDBClusterSnapshotAttributeMessage` contents to a `SignedRequest`. struct ModifyDBClusterSnapshotAttributeMessageSerializer; impl ModifyDBClusterSnapshotAttributeMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyDBClusterSnapshotAttributeMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "AttributeName"), &obj.attribute_name, ); params.put( &format!("{}{}", prefix, "DBClusterSnapshotIdentifier"), &obj.db_cluster_snapshot_identifier, ); if let Some(ref field_value) = obj.values_to_add { AttributeValueListSerializer::serialize( params, &format!("{}{}", prefix, "AttributeValue"), field_value, ); } if let Some(ref field_value) = obj.values_to_remove { AttributeValueListSerializer::serialize( params, &format!("{}{}", prefix, "AttributeValue"), field_value, ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBClusterSnapshotAttributeResult { pub db_cluster_snapshot_attributes_result: Option<DBClusterSnapshotAttributesResult>, } struct ModifyDBClusterSnapshotAttributeResultDeserializer; impl ModifyDBClusterSnapshotAttributeResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ModifyDBClusterSnapshotAttributeResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ModifyDBClusterSnapshotAttributeResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBClusterSnapshotAttributesResult" => { obj.db_cluster_snapshot_attributes_result = Some(try!( DBClusterSnapshotAttributesResultDeserializer::deserialize( "DBClusterSnapshotAttributesResult", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBInstanceMessage { /// <p>The new amount of storage (in gibibytes) to allocate for the DB instance. </p> <p>Not applicable. Storage is managed by the DB Cluster.</p> pub allocated_storage: Option<i64>, /// <p>Indicates that major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.</p> <p>Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.</p> pub allow_major_version_upgrade: Option<bool>, /// <p>Specifies whether the modifications in this request and any pending modifications are asynchronously applied as soon as possible, regardless of the <code>PreferredMaintenanceWindow</code> setting for the DB instance. </p> <p> If this parameter is set to <code>false</code>, changes to the DB instance are applied during the next maintenance window. Some parameter changes can cause an outage and are applied on the next call to <a>RebootDBInstance</a>, or the next failure reboot. </p> <p>Default: <code>false</code> </p> pub apply_immediately: Option<bool>, /// <p> Indicates that minor version upgrades are applied automatically to the DB instance during the maintenance window. Changing this parameter doesn't result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to <code>true</code> during the maintenance window, and a newer minor version is available, and Neptune has enabled auto patching for that engine version. </p> pub auto_minor_version_upgrade: Option<bool>, /// <p>The number of days to retain automated backups. Setting this parameter to a positive number enables backups. Setting this parameter to 0 disables automated backups.</p> <p>Not applicable. The retention period for automated backups is managed by the DB cluster. For more information, see <a>ModifyDBCluster</a>.</p> <p>Default: Uses existing setting</p> pub backup_retention_period: Option<i64>, /// <p>Indicates the certificate that needs to be associated with the instance.</p> pub ca_certificate_identifier: Option<String>, /// <p>The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.</p> pub cloudwatch_logs_export_configuration: Option<CloudwatchLogsExportConfiguration>, /// <p>True to copy all tags from the DB instance to snapshots of the DB instance, and otherwise false. The default is false.</p> pub copy_tags_to_snapshot: Option<bool>, /// <p>The new compute and memory capacity of the DB instance, for example, <code>db.m4.large</code>. Not all DB instance classes are available in all AWS Regions. </p> <p>If you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window, unless <code>ApplyImmediately</code> is specified as <code>true</code> for this request. </p> <p>Default: Uses existing setting</p> pub db_instance_class: Option<String>, /// <p><p>The DB instance identifier. This value is stored as a lowercase string.</p> <p>Constraints:</p> <ul> <li> <p>Must match the identifier of an existing DBInstance.</p> </li> </ul></p> pub db_instance_identifier: String, /// <p>The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.</p> <p>Default: Uses existing setting</p> <p>Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.</p> pub db_parameter_group_name: Option<String>, /// <p>The port number on which the database accepts connections.</p> <p>The value of the <code>DBPortNumber</code> parameter must not match any of the port values specified for options in the option group for the DB instance.</p> <p>Your database will restart when you change the <code>DBPortNumber</code> value regardless of the value of the <code>ApplyImmediately</code> parameter.</p> <p> Default: <code>8182</code> </p> pub db_port_number: Option<i64>, /// <p><p>A list of DB security groups to authorize on this DB instance. Changing this setting doesn&#39;t result in an outage and the change is asynchronously applied as soon as possible.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match existing DBSecurityGroups.</p> </li> </ul></p> pub db_security_groups: Option<Vec<String>>, /// <p>The new DB subnet group for the DB instance. You can use this parameter to move your DB instance to a different VPC. </p> <p>Changing the subnet group causes an outage during the change. The change is applied during the next maintenance window, unless you specify <code>true</code> for the <code>ApplyImmediately</code> parameter. </p> <p>Constraints: If supplied, must match the name of an existing DBSubnetGroup.</p> <p>Example: <code>mySubnetGroup</code> </p> pub db_subnet_group_name: Option<String>, /// <p>Not supported. </p> pub domain: Option<String>, /// <p>Not supported</p> pub domain_iam_role_name: Option<String>, /// <p>True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.</p> <p>You can enable IAM database authentication for the following database engines</p> <p>Not applicable. Mapping AWS IAM accounts to database accounts is managed by the DB cluster. For more information, see <a>ModifyDBCluster</a>.</p> <p>Default: <code>false</code> </p> pub enable_iam_database_authentication: Option<bool>, /// <p>True to enable Performance Insights for the DB instance, and otherwise false.</p> pub enable_performance_insights: Option<bool>, /// <p> The version number of the database engine to upgrade to. Changing this parameter results in an outage and the change is applied during the next maintenance window unless the <code>ApplyImmediately</code> parameter is set to <code>true</code> for this request. </p> <p>For major version upgrades, if a nondefault DB parameter group is currently in use, a new DB parameter group in the DB parameter group family for the new engine version must be specified. The new DB parameter group can be the default for that DB parameter group family.</p> pub engine_version: Option<String>, /// <p>The new Provisioned IOPS (I/O operations per second) value for the instance. </p> <p>Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the <code>ApplyImmediately</code> parameter is set to <code>true</code> for this request.</p> <p>Default: Uses existing setting</p> pub iops: Option<i64>, /// <p>The license model for the DB instance.</p> <p>Valid values: <code>license-included</code> | <code>bring-your-own-license</code> | <code>general-public-license</code> </p> pub license_model: Option<String>, /// <p>The new password for the master user. The password can include any printable ASCII character except "/", """, or "@".</p> <p>Not applicable. </p> <p>Default: Uses existing setting</p> pub master_user_password: Option<String>, /// <p>The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0.</p> <p>If <code>MonitoringRoleArn</code> is specified, then you must also set <code>MonitoringInterval</code> to a value other than 0.</p> <p>Valid Values: <code>0, 1, 5, 10, 15, 30, 60</code> </p> pub monitoring_interval: Option<i64>, /// <p>The ARN for the IAM role that permits Neptune to send enhanced monitoring metrics to Amazon CloudWatch Logs. For example, <code>arn:aws:iam:123456789012:role/emaccess</code>. </p> <p>If <code>MonitoringInterval</code> is set to a value other than 0, then you must supply a <code>MonitoringRoleArn</code> value.</p> pub monitoring_role_arn: Option<String>, /// <p>Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter doesn't result in an outage and the change is applied during the next maintenance window unless the <code>ApplyImmediately</code> parameter is set to <code>true</code> for this request. </p> pub multi_az: Option<bool>, /// <p> The new DB instance identifier for the DB instance when renaming a DB instance. When you change the DB instance identifier, an instance reboot will occur immediately if you set <code>Apply Immediately</code> to true, or will occur during the next maintenance window if <code>Apply Immediately</code> to false. This value is stored as a lowercase string. </p> <p>Constraints:</p> <ul> <li> <p>Must contain from 1 to 63 letters, numbers, or hyphens.</p> </li> <li> <p>The first character must be a letter.</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens.</p> </li> </ul> <p>Example: <code>mydbinstance</code> </p> pub new_db_instance_identifier: Option<String>, /// <p> Indicates that the DB instance should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case and the change is applied during the next maintenance window unless the <code>ApplyImmediately</code> parameter is set to <code>true</code> for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted. </p> <p>Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance</p> pub option_group_name: Option<String>, /// <p>The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.</p> pub performance_insights_kms_key_id: Option<String>, /// <p><p> The daily time range during which automated backups are created if automated backups are enabled. </p> <p>Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see <a>ModifyDBCluster</a>.</p> <p>Constraints:</p> <ul> <li> <p>Must be in the format hh24:mi-hh24:mi</p> </li> <li> <p>Must be in Universal Time Coordinated (UTC)</p> </li> <li> <p>Must not conflict with the preferred maintenance window</p> </li> <li> <p>Must be at least 30 minutes</p> </li> </ul></p> pub preferred_backup_window: Option<String>, /// <p>The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.</p> <p>Default: Uses existing setting</p> <p>Format: ddd:hh24:mi-ddd:hh24:mi</p> <p>Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun</p> <p>Constraints: Must be at least 30 minutes</p> pub preferred_maintenance_window: Option<String>, /// <p>A value that specifies the order in which a Read Replica is promoted to the primary instance after a failure of the existing primary instance. </p> <p>Default: 1</p> <p>Valid Values: 0 - 15</p> pub promotion_tier: Option<i64>, /// <p>Specifies the storage type to be associated with the DB instance. </p> <p>If you specify Provisioned IOPS (<code>io1</code>), you must also include a value for the <code>Iops</code> parameter. </p> <p>If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon Neptune operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance. </p> <p> Valid values: <code>standard | gp2 | io1</code> </p> <p>Default: <code>io1</code> if the <code>Iops</code> parameter is specified, otherwise <code>standard</code> </p> pub storage_type: Option<String>, /// <p>The ARN from the key store with which to associate the instance for TDE encryption.</p> pub tde_credential_arn: Option<String>, /// <p>The password for the given ARN from the key store in order to access the device.</p> pub tde_credential_password: Option<String>, /// <p><p>A list of EC2 VPC security groups to authorize on this DB instance. This change is asynchronously applied as soon as possible.</p> <p>Not applicable. The associated list of EC2 VPC security groups is managed by the DB cluster. For more information, see <a>ModifyDBCluster</a>.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match existing VpcSecurityGroupIds.</p> </li> </ul></p> pub vpc_security_group_ids: Option<Vec<String>>, } /// Serialize `ModifyDBInstanceMessage` contents to a `SignedRequest`. struct ModifyDBInstanceMessageSerializer; impl ModifyDBInstanceMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyDBInstanceMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.allocated_storage { params.put( &format!("{}{}", prefix, "AllocatedStorage"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.allow_major_version_upgrade { params.put( &format!("{}{}", prefix, "AllowMajorVersionUpgrade"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.apply_immediately { params.put( &format!("{}{}", prefix, "ApplyImmediately"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.auto_minor_version_upgrade { params.put( &format!("{}{}", prefix, "AutoMinorVersionUpgrade"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.backup_retention_period { params.put( &format!("{}{}", prefix, "BackupRetentionPeriod"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.ca_certificate_identifier { params.put( &format!("{}{}", prefix, "CACertificateIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.cloudwatch_logs_export_configuration { CloudwatchLogsExportConfigurationSerializer::serialize( params, &format!("{}{}", prefix, "CloudwatchLogsExportConfiguration"), field_value, ); } if let Some(ref field_value) = obj.copy_tags_to_snapshot { params.put( &format!("{}{}", prefix, "CopyTagsToSnapshot"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.db_instance_class { params.put(&format!("{}{}", prefix, "DBInstanceClass"), &field_value); } params.put( &format!("{}{}", prefix, "DBInstanceIdentifier"), &obj.db_instance_identifier, ); if let Some(ref field_value) = obj.db_parameter_group_name { params.put( &format!("{}{}", prefix, "DBParameterGroupName"), &field_value, ); } if let Some(ref field_value) = obj.db_port_number { params.put( &format!("{}{}", prefix, "DBPortNumber"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.db_security_groups { DBSecurityGroupNameListSerializer::serialize( params, &format!("{}{}", prefix, "DBSecurityGroupName"), field_value, ); } if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } if let Some(ref field_value) = obj.domain { params.put(&format!("{}{}", prefix, "Domain"), &field_value); } if let Some(ref field_value) = obj.domain_iam_role_name { params.put(&format!("{}{}", prefix, "DomainIAMRoleName"), &field_value); } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.enable_performance_insights { params.put( &format!("{}{}", prefix, "EnablePerformanceInsights"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.engine_version { params.put(&format!("{}{}", prefix, "EngineVersion"), &field_value); } if let Some(ref field_value) = obj.iops { params.put(&format!("{}{}", prefix, "Iops"), &field_value.to_string()); } if let Some(ref field_value) = obj.license_model { params.put(&format!("{}{}", prefix, "LicenseModel"), &field_value); } if let Some(ref field_value) = obj.master_user_password { params.put(&format!("{}{}", prefix, "MasterUserPassword"), &field_value); } if let Some(ref field_value) = obj.monitoring_interval { params.put( &format!("{}{}", prefix, "MonitoringInterval"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.monitoring_role_arn { params.put(&format!("{}{}", prefix, "MonitoringRoleArn"), &field_value); } if let Some(ref field_value) = obj.multi_az { params.put( &format!("{}{}", prefix, "MultiAZ"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.new_db_instance_identifier { params.put( &format!("{}{}", prefix, "NewDBInstanceIdentifier"), &field_value, ); } if let Some(ref field_value) = obj.option_group_name { params.put(&format!("{}{}", prefix, "OptionGroupName"), &field_value); } if let Some(ref field_value) = obj.performance_insights_kms_key_id { params.put( &format!("{}{}", prefix, "PerformanceInsightsKMSKeyId"), &field_value, ); } if let Some(ref field_value) = obj.preferred_backup_window { params.put( &format!("{}{}", prefix, "PreferredBackupWindow"), &field_value, ); } if let Some(ref field_value) = obj.preferred_maintenance_window { params.put( &format!("{}{}", prefix, "PreferredMaintenanceWindow"), &field_value, ); } if let Some(ref field_value) = obj.promotion_tier { params.put( &format!("{}{}", prefix, "PromotionTier"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.storage_type { params.put(&format!("{}{}", prefix, "StorageType"), &field_value); } if let Some(ref field_value) = obj.tde_credential_arn { params.put(&format!("{}{}", prefix, "TdeCredentialArn"), &field_value); } if let Some(ref field_value) = obj.tde_credential_password { params.put( &format!("{}{}", prefix, "TdeCredentialPassword"), &field_value, ); } if let Some(ref field_value) = obj.vpc_security_group_ids { VpcSecurityGroupIdListSerializer::serialize( params, &format!("{}{}", prefix, "VpcSecurityGroupId"), field_value, ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBInstanceResult { pub db_instance: Option<DBInstance>, } struct ModifyDBInstanceResultDeserializer; impl ModifyDBInstanceResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ModifyDBInstanceResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ModifyDBInstanceResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBInstance" => { obj.db_instance = Some(try!(DBInstanceDeserializer::deserialize( "DBInstance", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBParameterGroupMessage { /// <p><p>The name of the DB parameter group.</p> <p>Constraints:</p> <ul> <li> <p>If supplied, must match the name of an existing DBParameterGroup.</p> </li> </ul></p> pub db_parameter_group_name: String, /// <p><p>An array of parameter names, values, and the apply method for the parameter update. At least one parameter name, value, and apply method must be supplied; subsequent arguments are optional. A maximum of 20 parameters can be modified in a single request.</p> <p>Valid Values (for the application method): <code>immediate | pending-reboot</code> </p> <note> <p>You can use the immediate value with dynamic parameters only. You can use the pending-reboot value for both dynamic and static parameters, and changes are applied when you reboot the DB instance without failover.</p> </note></p> pub parameters: Vec<Parameter>, } /// Serialize `ModifyDBParameterGroupMessage` contents to a `SignedRequest`. struct ModifyDBParameterGroupMessageSerializer; impl ModifyDBParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyDBParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBParameterGroupName"), &obj.db_parameter_group_name, ); ParametersListSerializer::serialize( params, &format!("{}{}", prefix, "Parameter"), &obj.parameters, ); } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBSubnetGroupMessage { /// <p>The description for the DB subnet group.</p> pub db_subnet_group_description: Option<String>, /// <p>The name for the DB subnet group. This value is stored as a lowercase string. You can't modify the default subnet group. </p> <p>Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.</p> <p>Example: <code>mySubnetgroup</code> </p> pub db_subnet_group_name: String, /// <p>The EC2 subnet IDs for the DB subnet group.</p> pub subnet_ids: Vec<String>, } /// Serialize `ModifyDBSubnetGroupMessage` contents to a `SignedRequest`. struct ModifyDBSubnetGroupMessageSerializer; impl ModifyDBSubnetGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyDBSubnetGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.db_subnet_group_description { params.put( &format!("{}{}", prefix, "DBSubnetGroupDescription"), &field_value, ); } params.put( &format!("{}{}", prefix, "DBSubnetGroupName"), &obj.db_subnet_group_name, ); SubnetIdentifierListSerializer::serialize( params, &format!("{}{}", prefix, "SubnetIdentifier"), &obj.subnet_ids, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyDBSubnetGroupResult { pub db_subnet_group: Option<DBSubnetGroup>, } struct ModifyDBSubnetGroupResultDeserializer; impl ModifyDBSubnetGroupResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ModifyDBSubnetGroupResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ModifyDBSubnetGroupResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBSubnetGroup" => { obj.db_subnet_group = Some(try!(DBSubnetGroupDeserializer::deserialize( "DBSubnetGroup", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyEventSubscriptionMessage { /// <p> A Boolean value; set to <b>true</b> to activate the subscription. </p> pub enabled: Option<bool>, /// <p> A list of event categories for a SourceType that you want to subscribe to. You can see a list of the categories for a given SourceType by using the <b>DescribeEventCategories</b> action. </p> pub event_categories: Option<Vec<String>>, /// <p>The Amazon Resource Name (ARN) of the SNS topic created for event notification. The ARN is created by Amazon SNS when you create a topic and subscribe to it.</p> pub sns_topic_arn: Option<String>, /// <p>The type of source that is generating the events. For example, if you want to be notified of events generated by a DB instance, you would set this parameter to db-instance. if this value is not specified, all events are returned.</p> <p>Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot</p> pub source_type: Option<String>, /// <p>The name of the event notification subscription.</p> pub subscription_name: String, } /// Serialize `ModifyEventSubscriptionMessage` contents to a `SignedRequest`. struct ModifyEventSubscriptionMessageSerializer; impl ModifyEventSubscriptionMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ModifyEventSubscriptionMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.enabled { params.put( &format!("{}{}", prefix, "Enabled"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.event_categories { EventCategoriesListSerializer::serialize( params, &format!("{}{}", prefix, "EventCategory"), field_value, ); } if let Some(ref field_value) = obj.sns_topic_arn { params.put(&format!("{}{}", prefix, "SnsTopicArn"), &field_value); } if let Some(ref field_value) = obj.source_type { params.put(&format!("{}{}", prefix, "SourceType"), &field_value); } params.put( &format!("{}{}", prefix, "SubscriptionName"), &obj.subscription_name, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct ModifyEventSubscriptionResult { pub event_subscription: Option<EventSubscription>, } struct ModifyEventSubscriptionResultDeserializer; impl ModifyEventSubscriptionResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ModifyEventSubscriptionResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ModifyEventSubscriptionResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EventSubscription" => { obj.event_subscription = Some(try!( EventSubscriptionDeserializer::deserialize("EventSubscription", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Provides information on the option groups the DB instance is a member of.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct OptionGroupMembership { /// <p>The name of the option group that the instance belongs to.</p> pub option_group_name: Option<String>, /// <p>The status of the DB instance's option group membership. Valid values are: <code>in-sync</code>, <code>pending-apply</code>, <code>pending-removal</code>, <code>pending-maintenance-apply</code>, <code>pending-maintenance-removal</code>, <code>applying</code>, <code>removing</code>, and <code>failed</code>. </p> pub status: Option<String>, } struct OptionGroupMembershipDeserializer; impl OptionGroupMembershipDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<OptionGroupMembership, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = OptionGroupMembership::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "OptionGroupName" => { obj.option_group_name = Some(try!(StringDeserializer::deserialize( "OptionGroupName", stack ))); } "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct OptionGroupMembershipListDeserializer; impl OptionGroupMembershipListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<OptionGroupMembership>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "OptionGroupMembership" { obj.push(try!(OptionGroupMembershipDeserializer::deserialize( "OptionGroupMembership", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Contains a list of available options for a DB instance.</p> <p> This data type is used as a response element in the <a>DescribeOrderableDBInstanceOptions</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct OrderableDBInstanceOption { /// <p>A list of Availability Zones for a DB instance.</p> pub availability_zones: Option<Vec<AvailabilityZone>>, /// <p>The DB instance class for a DB instance.</p> pub db_instance_class: Option<String>, /// <p>The engine type of a DB instance.</p> pub engine: Option<String>, /// <p>The engine version of a DB instance.</p> pub engine_version: Option<String>, /// <p>The license model for a DB instance.</p> pub license_model: Option<String>, /// <p>Maximum total provisioned IOPS for a DB instance.</p> pub max_iops_per_db_instance: Option<i64>, /// <p>Maximum provisioned IOPS per GiB for a DB instance.</p> pub max_iops_per_gib: Option<f64>, /// <p>Maximum storage size for a DB instance.</p> pub max_storage_size: Option<i64>, /// <p>Minimum total provisioned IOPS for a DB instance.</p> pub min_iops_per_db_instance: Option<i64>, /// <p>Minimum provisioned IOPS per GiB for a DB instance.</p> pub min_iops_per_gib: Option<f64>, /// <p>Minimum storage size for a DB instance.</p> pub min_storage_size: Option<i64>, /// <p>Indicates whether a DB instance is Multi-AZ capable.</p> pub multi_az_capable: Option<bool>, /// <p>Indicates whether a DB instance can have a Read Replica.</p> pub read_replica_capable: Option<bool>, /// <p>Indicates the storage type for a DB instance.</p> pub storage_type: Option<String>, /// <p>Indicates whether a DB instance supports Enhanced Monitoring at intervals from 1 to 60 seconds.</p> pub supports_enhanced_monitoring: Option<bool>, /// <p>Indicates whether a DB instance supports IAM database authentication.</p> pub supports_iam_database_authentication: Option<bool>, /// <p>Indicates whether a DB instance supports provisioned IOPS.</p> pub supports_iops: Option<bool>, /// <p>True if a DB instance supports Performance Insights, otherwise false.</p> pub supports_performance_insights: Option<bool>, /// <p>Indicates whether a DB instance supports encrypted storage.</p> pub supports_storage_encryption: Option<bool>, /// <p>Indicates whether a DB instance is in a VPC.</p> pub vpc: Option<bool>, } struct OrderableDBInstanceOptionDeserializer; impl OrderableDBInstanceOptionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<OrderableDBInstanceOption, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = OrderableDBInstanceOption::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AvailabilityZones" => { obj.availability_zones = Some(try!(AvailabilityZoneListDeserializer::deserialize( "AvailabilityZones", stack ))); } "DBInstanceClass" => { obj.db_instance_class = Some(try!(StringDeserializer::deserialize( "DBInstanceClass", stack ))); } "Engine" => { obj.engine = Some(try!(StringDeserializer::deserialize("Engine", stack))); } "EngineVersion" => { obj.engine_version = Some(try!(StringDeserializer::deserialize( "EngineVersion", stack ))); } "LicenseModel" => { obj.license_model = Some(try!(StringDeserializer::deserialize("LicenseModel", stack))); } "MaxIopsPerDbInstance" => { obj.max_iops_per_db_instance = Some(try!( IntegerOptionalDeserializer::deserialize("MaxIopsPerDbInstance", stack) )); } "MaxIopsPerGib" => { obj.max_iops_per_gib = Some(try!(DoubleOptionalDeserializer::deserialize( "MaxIopsPerGib", stack ))); } "MaxStorageSize" => { obj.max_storage_size = Some(try!( IntegerOptionalDeserializer::deserialize("MaxStorageSize", stack) )); } "MinIopsPerDbInstance" => { obj.min_iops_per_db_instance = Some(try!( IntegerOptionalDeserializer::deserialize("MinIopsPerDbInstance", stack) )); } "MinIopsPerGib" => { obj.min_iops_per_gib = Some(try!(DoubleOptionalDeserializer::deserialize( "MinIopsPerGib", stack ))); } "MinStorageSize" => { obj.min_storage_size = Some(try!( IntegerOptionalDeserializer::deserialize("MinStorageSize", stack) )); } "MultiAZCapable" => { obj.multi_az_capable = Some(try!(BooleanDeserializer::deserialize( "MultiAZCapable", stack ))); } "ReadReplicaCapable" => { obj.read_replica_capable = Some(try!(BooleanDeserializer::deserialize( "ReadReplicaCapable", stack ))); } "StorageType" => { obj.storage_type = Some(try!(StringDeserializer::deserialize("StorageType", stack))); } "SupportsEnhancedMonitoring" => { obj.supports_enhanced_monitoring = Some(try!( BooleanDeserializer::deserialize("SupportsEnhancedMonitoring", stack) )); } "SupportsIAMDatabaseAuthentication" => { obj.supports_iam_database_authentication = Some(try!(BooleanDeserializer::deserialize( "SupportsIAMDatabaseAuthentication", stack ))); } "SupportsIops" => { obj.supports_iops = Some(try!(BooleanDeserializer::deserialize( "SupportsIops", stack ))); } "SupportsPerformanceInsights" => { obj.supports_performance_insights = Some(try!( BooleanDeserializer::deserialize("SupportsPerformanceInsights", stack) )); } "SupportsStorageEncryption" => { obj.supports_storage_encryption = Some(try!( BooleanDeserializer::deserialize("SupportsStorageEncryption", stack) )); } "Vpc" => { obj.vpc = Some(try!(BooleanDeserializer::deserialize("Vpc", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct OrderableDBInstanceOptionsListDeserializer; impl OrderableDBInstanceOptionsListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<OrderableDBInstanceOption>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "OrderableDBInstanceOption" { obj.push(try!(OrderableDBInstanceOptionDeserializer::deserialize( "OrderableDBInstanceOption", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p> Contains the result of a successful invocation of the <a>DescribeOrderableDBInstanceOptions</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct OrderableDBInstanceOptionsMessage { /// <p> An optional pagination token provided by a previous OrderableDBInstanceOptions request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by <code>MaxRecords</code> . </p> pub marker: Option<String>, /// <p>An <a>OrderableDBInstanceOption</a> structure containing information about orderable options for the DB instance.</p> pub orderable_db_instance_options: Option<Vec<OrderableDBInstanceOption>>, } struct OrderableDBInstanceOptionsMessageDeserializer; impl OrderableDBInstanceOptionsMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<OrderableDBInstanceOptionsMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = OrderableDBInstanceOptionsMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } "OrderableDBInstanceOptions" => { obj.orderable_db_instance_options = Some(try!( OrderableDBInstanceOptionsListDeserializer::deserialize( "OrderableDBInstanceOptions", stack ) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> This data type is used as a request parameter in the <a>ModifyDBParameterGroup</a> and <a>ResetDBParameterGroup</a> actions. </p> <p>This data type is used as a response element in the <a>DescribeEngineDefaultParameters</a> and <a>DescribeDBParameters</a> actions.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct Parameter { /// <p>Specifies the valid range of values for the parameter.</p> pub allowed_values: Option<String>, /// <p>Indicates when to apply parameter updates.</p> pub apply_method: Option<String>, /// <p>Specifies the engine specific parameters type.</p> pub apply_type: Option<String>, /// <p>Specifies the valid data type for the parameter.</p> pub data_type: Option<String>, /// <p>Provides a description of the parameter.</p> pub description: Option<String>, /// <p> Indicates whether (<code>true</code>) or not (<code>false</code>) the parameter can be modified. Some parameters have security or operational implications that prevent them from being changed. </p> pub is_modifiable: Option<bool>, /// <p>The earliest engine version to which the parameter can apply.</p> pub minimum_engine_version: Option<String>, /// <p>Specifies the name of the parameter.</p> pub parameter_name: Option<String>, /// <p>Specifies the value of the parameter.</p> pub parameter_value: Option<String>, /// <p>Indicates the source of the parameter value.</p> pub source: Option<String>, } struct ParameterDeserializer; impl ParameterDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Parameter, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Parameter::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AllowedValues" => { obj.allowed_values = Some(try!(StringDeserializer::deserialize( "AllowedValues", stack ))); } "ApplyMethod" => { obj.apply_method = Some(try!(ApplyMethodDeserializer::deserialize( "ApplyMethod", stack ))); } "ApplyType" => { obj.apply_type = Some(try!(StringDeserializer::deserialize("ApplyType", stack))); } "DataType" => { obj.data_type = Some(try!(StringDeserializer::deserialize("DataType", stack))); } "Description" => { obj.description = Some(try!(StringDeserializer::deserialize("Description", stack))); } "IsModifiable" => { obj.is_modifiable = Some(try!(BooleanDeserializer::deserialize( "IsModifiable", stack ))); } "MinimumEngineVersion" => { obj.minimum_engine_version = Some(try!(StringDeserializer::deserialize( "MinimumEngineVersion", stack ))); } "ParameterName" => { obj.parameter_name = Some(try!(StringDeserializer::deserialize( "ParameterName", stack ))); } "ParameterValue" => { obj.parameter_value = Some(try!(StringDeserializer::deserialize( "ParameterValue", stack ))); } "Source" => { obj.source = Some(try!(StringDeserializer::deserialize("Source", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// Serialize `Parameter` contents to a `SignedRequest`. struct ParameterSerializer; impl ParameterSerializer { fn serialize(params: &mut Params, name: &str, obj: &Parameter) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.allowed_values { params.put(&format!("{}{}", prefix, "AllowedValues"), &field_value); } if let Some(ref field_value) = obj.apply_method { params.put(&format!("{}{}", prefix, "ApplyMethod"), &field_value); } if let Some(ref field_value) = obj.apply_type { params.put(&format!("{}{}", prefix, "ApplyType"), &field_value); } if let Some(ref field_value) = obj.data_type { params.put(&format!("{}{}", prefix, "DataType"), &field_value); } if let Some(ref field_value) = obj.description { params.put(&format!("{}{}", prefix, "Description"), &field_value); } if let Some(ref field_value) = obj.is_modifiable { params.put( &format!("{}{}", prefix, "IsModifiable"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.minimum_engine_version { params.put( &format!("{}{}", prefix, "MinimumEngineVersion"), &field_value, ); } if let Some(ref field_value) = obj.parameter_name { params.put(&format!("{}{}", prefix, "ParameterName"), &field_value); } if let Some(ref field_value) = obj.parameter_value { params.put(&format!("{}{}", prefix, "ParameterValue"), &field_value); } if let Some(ref field_value) = obj.source { params.put(&format!("{}{}", prefix, "Source"), &field_value); } } } struct ParametersListDeserializer; impl ParametersListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Parameter>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Parameter" { obj.push(try!(ParameterDeserializer::deserialize("Parameter", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Serialize `ParametersList` contents to a `SignedRequest`. struct ParametersListSerializer; impl ParametersListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<Parameter>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); ParameterSerializer::serialize(params, &key, obj); } } } /// <p>A list of the log types whose configuration is still pending. In other words, these log types are in the process of being activated or deactivated.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct PendingCloudwatchLogsExports { /// <p>Log types that are in the process of being enabled. After they are enabled, these log types are exported to CloudWatch Logs.</p> pub log_types_to_disable: Option<Vec<String>>, /// <p>Log types that are in the process of being deactivated. After they are deactivated, these log types aren't exported to CloudWatch Logs.</p> pub log_types_to_enable: Option<Vec<String>>, } struct PendingCloudwatchLogsExportsDeserializer; impl PendingCloudwatchLogsExportsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PendingCloudwatchLogsExports, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = PendingCloudwatchLogsExports::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "LogTypesToDisable" => { obj.log_types_to_disable = Some(try!( LogTypeListDeserializer::deserialize("LogTypesToDisable", stack) )); } "LogTypesToEnable" => { obj.log_types_to_enable = Some(try!(LogTypeListDeserializer::deserialize( "LogTypesToEnable", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Provides information about a pending maintenance action for a resource.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct PendingMaintenanceAction { /// <p>The type of pending maintenance action that is available for the resource.</p> pub action: Option<String>, /// <p>The date of the maintenance window when the action is applied. The maintenance action is applied to the resource during its first maintenance window after this date. If this date is specified, any <code>next-maintenance</code> opt-in requests are ignored.</p> pub auto_applied_after_date: Option<String>, /// <p>The effective date when the pending maintenance action is applied to the resource. This date takes into account opt-in requests received from the <a>ApplyPendingMaintenanceAction</a> API, the <code>AutoAppliedAfterDate</code>, and the <code>ForcedApplyDate</code>. This value is blank if an opt-in request has not been received and nothing has been specified as <code>AutoAppliedAfterDate</code> or <code>ForcedApplyDate</code>.</p> pub current_apply_date: Option<String>, /// <p>A description providing more detail about the maintenance action.</p> pub description: Option<String>, /// <p>The date when the maintenance action is automatically applied. The maintenance action is applied to the resource on this date regardless of the maintenance window for the resource. If this date is specified, any <code>immediate</code> opt-in requests are ignored.</p> pub forced_apply_date: Option<String>, /// <p>Indicates the type of opt-in request that has been received for the resource.</p> pub opt_in_status: Option<String>, } struct PendingMaintenanceActionDeserializer; impl PendingMaintenanceActionDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PendingMaintenanceAction, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = PendingMaintenanceAction::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Action" => { obj.action = Some(try!(StringDeserializer::deserialize("Action", stack))); } "AutoAppliedAfterDate" => { obj.auto_applied_after_date = Some(try!(TStampDeserializer::deserialize( "AutoAppliedAfterDate", stack ))); } "CurrentApplyDate" => { obj.current_apply_date = Some(try!(TStampDeserializer::deserialize( "CurrentApplyDate", stack ))); } "Description" => { obj.description = Some(try!(StringDeserializer::deserialize("Description", stack))); } "ForcedApplyDate" => { obj.forced_apply_date = Some(try!(TStampDeserializer::deserialize( "ForcedApplyDate", stack ))); } "OptInStatus" => { obj.opt_in_status = Some(try!(StringDeserializer::deserialize("OptInStatus", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct PendingMaintenanceActionDetailsDeserializer; impl PendingMaintenanceActionDetailsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<PendingMaintenanceAction>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "PendingMaintenanceAction" { obj.push(try!(PendingMaintenanceActionDeserializer::deserialize( "PendingMaintenanceAction", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct PendingMaintenanceActionsDeserializer; impl PendingMaintenanceActionsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<ResourcePendingMaintenanceActions>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "ResourcePendingMaintenanceActions" { obj.push(try!( ResourcePendingMaintenanceActionsDeserializer::deserialize( "ResourcePendingMaintenanceActions", stack ) )); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p>Data returned from the <b>DescribePendingMaintenanceActions</b> action.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct PendingMaintenanceActionsMessage { /// <p> An optional pagination token provided by a previous <code>DescribePendingMaintenanceActions</code> request. If this parameter is specified, the response includes only records beyond the marker, up to a number of records specified by <code>MaxRecords</code>. </p> pub marker: Option<String>, /// <p>A list of the pending maintenance actions for the resource.</p> pub pending_maintenance_actions: Option<Vec<ResourcePendingMaintenanceActions>>, } struct PendingMaintenanceActionsMessageDeserializer; impl PendingMaintenanceActionsMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PendingMaintenanceActionsMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = PendingMaintenanceActionsMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Marker" => { obj.marker = Some(try!(StringDeserializer::deserialize("Marker", stack))); } "PendingMaintenanceActions" => { obj.pending_maintenance_actions = Some(try!(PendingMaintenanceActionsDeserializer::deserialize( "PendingMaintenanceActions", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> This data type is used as a response element in the <a>ModifyDBInstance</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct PendingModifiedValues { /// <p> Contains the new <code>AllocatedStorage</code> size for the DB instance that will be applied or is currently being applied. </p> pub allocated_storage: Option<i64>, /// <p>Specifies the pending number of days for which automated backups are retained.</p> pub backup_retention_period: Option<i64>, /// <p>Specifies the identifier of the CA certificate for the DB instance.</p> pub ca_certificate_identifier: Option<String>, /// <p> Contains the new <code>DBInstanceClass</code> for the DB instance that will be applied or is currently being applied. </p> pub db_instance_class: Option<String>, /// <p> Contains the new <code>DBInstanceIdentifier</code> for the DB instance that will be applied or is currently being applied. </p> pub db_instance_identifier: Option<String>, /// <p>The new DB subnet group for the DB instance. </p> pub db_subnet_group_name: Option<String>, /// <p>Indicates the database engine version.</p> pub engine_version: Option<String>, /// <p>Specifies the new Provisioned IOPS value for the DB instance that will be applied or is currently being applied.</p> pub iops: Option<i64>, /// <p>The license model for the DB instance.</p> <p>Valid values: <code>license-included</code> | <code>bring-your-own-license</code> | <code>general-public-license</code> </p> pub license_model: Option<String>, /// <p>Contains the pending or currently-in-progress change of the master credentials for the DB instance.</p> pub master_user_password: Option<String>, /// <p>Indicates that the Single-AZ DB instance is to change to a Multi-AZ deployment.</p> pub multi_az: Option<bool>, pub pending_cloudwatch_logs_exports: Option<PendingCloudwatchLogsExports>, /// <p>Specifies the pending port for the DB instance.</p> pub port: Option<i64>, /// <p>Specifies the storage type to be associated with the DB instance.</p> pub storage_type: Option<String>, } struct PendingModifiedValuesDeserializer; impl PendingModifiedValuesDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PendingModifiedValues, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = PendingModifiedValues::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AllocatedStorage" => { obj.allocated_storage = Some(try!( IntegerOptionalDeserializer::deserialize("AllocatedStorage", stack) )); } "BackupRetentionPeriod" => { obj.backup_retention_period = Some(try!(IntegerOptionalDeserializer::deserialize( "BackupRetentionPeriod", stack ))); } "CACertificateIdentifier" => { obj.ca_certificate_identifier = Some(try!( StringDeserializer::deserialize("CACertificateIdentifier", stack) )); } "DBInstanceClass" => { obj.db_instance_class = Some(try!(StringDeserializer::deserialize( "DBInstanceClass", stack ))); } "DBInstanceIdentifier" => { obj.db_instance_identifier = Some(try!(StringDeserializer::deserialize( "DBInstanceIdentifier", stack ))); } "DBSubnetGroupName" => { obj.db_subnet_group_name = Some(try!(StringDeserializer::deserialize( "DBSubnetGroupName", stack ))); } "EngineVersion" => { obj.engine_version = Some(try!(StringDeserializer::deserialize( "EngineVersion", stack ))); } "Iops" => { obj.iops = Some(try!(IntegerOptionalDeserializer::deserialize( "Iops", stack ))); } "LicenseModel" => { obj.license_model = Some(try!(StringDeserializer::deserialize("LicenseModel", stack))); } "MasterUserPassword" => { obj.master_user_password = Some(try!(StringDeserializer::deserialize( "MasterUserPassword", stack ))); } "MultiAZ" => { obj.multi_az = Some(try!(BooleanOptionalDeserializer::deserialize( "MultiAZ", stack ))); } "PendingCloudwatchLogsExports" => { obj.pending_cloudwatch_logs_exports = Some(try!(PendingCloudwatchLogsExportsDeserializer::deserialize( "PendingCloudwatchLogsExports", stack ))); } "Port" => { obj.port = Some(try!(IntegerOptionalDeserializer::deserialize( "Port", stack ))); } "StorageType" => { obj.storage_type = Some(try!(StringDeserializer::deserialize("StorageType", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct PromoteReadReplicaDBClusterMessage { /// <p>The identifier of the DB cluster Read Replica to promote. This parameter is not case-sensitive. </p> <p>Constraints:</p> <ul> <li> <p>Must match the identifier of an existing DBCluster Read Replica.</p> </li> </ul> <p>Example: <code>my-cluster-replica1</code> </p> pub db_cluster_identifier: String, } /// Serialize `PromoteReadReplicaDBClusterMessage` contents to a `SignedRequest`. struct PromoteReadReplicaDBClusterMessageSerializer; impl PromoteReadReplicaDBClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &PromoteReadReplicaDBClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct PromoteReadReplicaDBClusterResult { pub db_cluster: Option<DBCluster>, } struct PromoteReadReplicaDBClusterResultDeserializer; impl PromoteReadReplicaDBClusterResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<PromoteReadReplicaDBClusterResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = PromoteReadReplicaDBClusterResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBCluster" => { obj.db_cluster = Some(try!(DBClusterDeserializer::deserialize("DBCluster", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>A range of integer values.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct Range { /// <p>The minimum value in the range.</p> pub from: Option<i64>, /// <p>The step value for the range. For example, if you have a range of 5,000 to 10,000, with a step value of 1,000, the valid values start at 5,000 and step up by 1,000. Even though 7,500 is within the range, it isn't a valid value for the range. The valid values are 5,000, 6,000, 7,000, 8,000... </p> pub step: Option<i64>, /// <p>The maximum value in the range.</p> pub to: Option<i64>, } struct RangeDeserializer; impl RangeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Range, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Range::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "From" => { obj.from = Some(try!(IntegerDeserializer::deserialize("From", stack))); } "Step" => { obj.step = Some(try!(IntegerOptionalDeserializer::deserialize( "Step", stack ))); } "To" => { obj.to = Some(try!(IntegerDeserializer::deserialize("To", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct RangeListDeserializer; impl RangeListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Range>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Range" { obj.push(try!(RangeDeserializer::deserialize("Range", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct ReadReplicaDBClusterIdentifierListDeserializer; impl ReadReplicaDBClusterIdentifierListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "ReadReplicaDBClusterIdentifier" { obj.push(try!(StringDeserializer::deserialize( "ReadReplicaDBClusterIdentifier", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct ReadReplicaDBInstanceIdentifierListDeserializer; impl ReadReplicaDBInstanceIdentifierListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "ReadReplicaDBInstanceIdentifier" { obj.push(try!(StringDeserializer::deserialize( "ReadReplicaDBInstanceIdentifier", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct ReadReplicaIdentifierListDeserializer; impl ReadReplicaIdentifierListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "ReadReplicaIdentifier" { obj.push(try!(StringDeserializer::deserialize( "ReadReplicaIdentifier", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct RebootDBInstanceMessage { /// <p><p>The DB instance identifier. This parameter is stored as a lowercase string.</p> <p>Constraints:</p> <ul> <li> <p>Must match the identifier of an existing DBInstance.</p> </li> </ul></p> pub db_instance_identifier: String, /// <p> When <code>true</code>, the reboot is conducted through a MultiAZ failover. </p> <p>Constraint: You can't specify <code>true</code> if the instance is not configured for MultiAZ.</p> pub force_failover: Option<bool>, } /// Serialize `RebootDBInstanceMessage` contents to a `SignedRequest`. struct RebootDBInstanceMessageSerializer; impl RebootDBInstanceMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RebootDBInstanceMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBInstanceIdentifier"), &obj.db_instance_identifier, ); if let Some(ref field_value) = obj.force_failover { params.put( &format!("{}{}", prefix, "ForceFailover"), &field_value.to_string(), ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct RebootDBInstanceResult { pub db_instance: Option<DBInstance>, } struct RebootDBInstanceResultDeserializer; impl RebootDBInstanceResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RebootDBInstanceResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RebootDBInstanceResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBInstance" => { obj.db_instance = Some(try!(DBInstanceDeserializer::deserialize( "DBInstance", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } #[derive(Default, Debug, Clone, PartialEq)] pub struct RemoveRoleFromDBClusterMessage { /// <p>The name of the DB cluster to disassociate the IAM role from.</p> pub db_cluster_identifier: String, /// <p>The Amazon Resource Name (ARN) of the IAM role to disassociate from the DB cluster, for example <code>arn:aws:iam::123456789012:role/NeptuneAccessRole</code>.</p> pub role_arn: String, } /// Serialize `RemoveRoleFromDBClusterMessage` contents to a `SignedRequest`. struct RemoveRoleFromDBClusterMessageSerializer; impl RemoveRoleFromDBClusterMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RemoveRoleFromDBClusterMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, ); params.put(&format!("{}{}", prefix, "RoleArn"), &obj.role_arn); } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct RemoveSourceIdentifierFromSubscriptionMessage { /// <p> The source identifier to be removed from the subscription, such as the <b>DB instance identifier</b> for a DB instance or the name of a security group. </p> pub source_identifier: String, /// <p>The name of the event notification subscription you want to remove a source identifier from.</p> pub subscription_name: String, } /// Serialize `RemoveSourceIdentifierFromSubscriptionMessage` contents to a `SignedRequest`. struct RemoveSourceIdentifierFromSubscriptionMessageSerializer; impl RemoveSourceIdentifierFromSubscriptionMessageSerializer { fn serialize( params: &mut Params, name: &str, obj: &RemoveSourceIdentifierFromSubscriptionMessage, ) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "SourceIdentifier"), &obj.source_identifier, ); params.put( &format!("{}{}", prefix, "SubscriptionName"), &obj.subscription_name, ); } } #[derive(Default, Debug, Clone, PartialEq)] pub struct RemoveSourceIdentifierFromSubscriptionResult { pub event_subscription: Option<EventSubscription>, } struct RemoveSourceIdentifierFromSubscriptionResultDeserializer; impl RemoveSourceIdentifierFromSubscriptionResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RemoveSourceIdentifierFromSubscriptionResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RemoveSourceIdentifierFromSubscriptionResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "EventSubscription" => { obj.event_subscription = Some(try!( EventSubscriptionDeserializer::deserialize("EventSubscription", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct RemoveTagsFromResourceMessage { /// <p>The Amazon Neptune resource that the tags are removed from. This value is an Amazon Resource Name (ARN). For information about creating an ARN, see <a href="http://docs.aws.amazon.com/neptune/latest/UserGuide/tagging.ARN.html#tagging.ARN.Constructing"> Constructing an Amazon Resource Name (ARN)</a>.</p> pub resource_name: String, /// <p>The tag key (name) of the tag to be removed.</p> pub tag_keys: Vec<String>, } /// Serialize `RemoveTagsFromResourceMessage` contents to a `SignedRequest`. struct RemoveTagsFromResourceMessageSerializer; impl RemoveTagsFromResourceMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RemoveTagsFromResourceMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put(&format!("{}{}", prefix, "ResourceName"), &obj.resource_name); KeyListSerializer::serialize(params, &format!("{}{}", prefix, "TagKeys"), &obj.tag_keys); } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ResetDBClusterParameterGroupMessage { /// <p>The name of the DB cluster parameter group to reset.</p> pub db_cluster_parameter_group_name: String, /// <p>A list of parameter names in the DB cluster parameter group to reset to the default values. You can't use this parameter if the <code>ResetAllParameters</code> parameter is set to <code>true</code>.</p> pub parameters: Option<Vec<Parameter>>, /// <p>A value that is set to <code>true</code> to reset all parameters in the DB cluster parameter group to their default values, and <code>false</code> otherwise. You can't use this parameter if there is a list of parameter names specified for the <code>Parameters</code> parameter.</p> pub reset_all_parameters: Option<bool>, } /// Serialize `ResetDBClusterParameterGroupMessage` contents to a `SignedRequest`. struct ResetDBClusterParameterGroupMessageSerializer; impl ResetDBClusterParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ResetDBClusterParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterParameterGroupName"), &obj.db_cluster_parameter_group_name, ); if let Some(ref field_value) = obj.parameters { ParametersListSerializer::serialize( params, &format!("{}{}", prefix, "Parameter"), field_value, ); } if let Some(ref field_value) = obj.reset_all_parameters { params.put( &format!("{}{}", prefix, "ResetAllParameters"), &field_value.to_string(), ); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ResetDBParameterGroupMessage { /// <p><p>The name of the DB parameter group.</p> <p>Constraints:</p> <ul> <li> <p>Must match the name of an existing DBParameterGroup.</p> </li> </ul></p> pub db_parameter_group_name: String, /// <p>To reset the entire DB parameter group, specify the <code>DBParameterGroup</code> name and <code>ResetAllParameters</code> parameters. To reset specific parameters, provide a list of the following: <code>ParameterName</code> and <code>ApplyMethod</code>. A maximum of 20 parameters can be modified in a single request.</p> <p>Valid Values (for Apply method): <code>pending-reboot</code> </p> pub parameters: Option<Vec<Parameter>>, /// <p> Specifies whether (<code>true</code>) or not (<code>false</code>) to reset all parameters in the DB parameter group to default values. </p> <p>Default: <code>true</code> </p> pub reset_all_parameters: Option<bool>, } /// Serialize `ResetDBParameterGroupMessage` contents to a `SignedRequest`. struct ResetDBParameterGroupMessageSerializer; impl ResetDBParameterGroupMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &ResetDBParameterGroupMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBParameterGroupName"), &obj.db_parameter_group_name, ); if let Some(ref field_value) = obj.parameters { ParametersListSerializer::serialize( params, &format!("{}{}", prefix, "Parameter"), field_value, ); } if let Some(ref field_value) = obj.reset_all_parameters { params.put( &format!("{}{}", prefix, "ResetAllParameters"), &field_value.to_string(), ); } } } /// <p>Describes the pending maintenance actions for a resource.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ResourcePendingMaintenanceActions { /// <p>A list that provides details about the pending maintenance actions for the resource.</p> pub pending_maintenance_action_details: Option<Vec<PendingMaintenanceAction>>, /// <p>The ARN of the resource that has pending maintenance actions.</p> pub resource_identifier: Option<String>, } struct ResourcePendingMaintenanceActionsDeserializer; impl ResourcePendingMaintenanceActionsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ResourcePendingMaintenanceActions, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ResourcePendingMaintenanceActions::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "PendingMaintenanceActionDetails" => { obj.pending_maintenance_action_details = Some(try!( PendingMaintenanceActionDetailsDeserializer::deserialize( "PendingMaintenanceActionDetails", stack ) )); } "ResourceIdentifier" => { obj.resource_identifier = Some(try!(StringDeserializer::deserialize( "ResourceIdentifier", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct RestoreDBClusterFromSnapshotMessage { /// <p>Provides the list of EC2 Availability Zones that instances in the restored DB cluster can be created in.</p> pub availability_zones: Option<Vec<String>>, /// <p>The name of the DB cluster to create from the DB snapshot or DB cluster snapshot. This parameter isn't case-sensitive.</p> <p>Constraints:</p> <ul> <li> <p>Must contain from 1 to 63 letters, numbers, or hyphens</p> </li> <li> <p>First character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> </ul> <p>Example: <code>my-snapshot-id</code> </p> pub db_cluster_identifier: String, /// <p>The name of the DB subnet group to use for the new DB cluster.</p> <p>Constraints: If supplied, must match the name of an existing DBSubnetGroup.</p> <p>Example: <code>mySubnetgroup</code> </p> pub db_subnet_group_name: Option<String>, /// <p>The database name for the restored DB cluster.</p> pub database_name: Option<String>, /// <p>True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.</p> <p>Default: <code>false</code> </p> pub enable_iam_database_authentication: Option<bool>, /// <p>The database engine to use for the new DB cluster.</p> <p>Default: The same as source</p> <p>Constraint: Must be compatible with the engine of the source</p> pub engine: String, /// <p>The version of the database engine to use for the new DB cluster.</p> pub engine_version: Option<String>, /// <p><p>The AWS KMS key identifier to use when restoring an encrypted DB cluster from a DB snapshot or DB cluster snapshot.</p> <p>The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.</p> <p>If you do not specify a value for the <code>KmsKeyId</code> parameter, then the following will occur:</p> <ul> <li> <p>If the DB snapshot or DB cluster snapshot in <code>SnapshotIdentifier</code> is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the DB snapshot or DB cluster snapshot.</p> </li> <li> <p>If the DB snapshot or DB cluster snapshot in <code>SnapshotIdentifier</code> is not encrypted, then the restored DB cluster is not encrypted.</p> </li> </ul></p> pub kms_key_id: Option<String>, /// <p>The name of the option group to use for the restored DB cluster.</p> pub option_group_name: Option<String>, /// <p>The port number on which the new DB cluster accepts connections.</p> <p>Constraints: Value must be <code>1150-65535</code> </p> <p>Default: The same port as the original DB cluster.</p> pub port: Option<i64>, /// <p><p>The identifier for the DB snapshot or DB cluster snapshot to restore from.</p> <p>You can use either the name or the Amazon Resource Name (ARN) to specify a DB cluster snapshot. However, you can use only the ARN to specify a DB snapshot.</p> <p>Constraints:</p> <ul> <li> <p>Must match the identifier of an existing Snapshot.</p> </li> </ul></p> pub snapshot_identifier: String, /// <p>The tags to be assigned to the restored DB cluster.</p> pub tags: Option<Vec<Tag>>, /// <p>A list of VPC security groups that the new DB cluster will belong to.</p> pub vpc_security_group_ids: Option<Vec<String>>, } /// Serialize `RestoreDBClusterFromSnapshotMessage` contents to a `SignedRequest`. struct RestoreDBClusterFromSnapshotMessageSerializer; impl RestoreDBClusterFromSnapshotMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RestoreDBClusterFromSnapshotMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.availability_zones { AvailabilityZonesSerializer::serialize( params, &format!("{}{}", prefix, "AvailabilityZone"), field_value, ); } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, ); if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } if let Some(ref field_value) = obj.database_name { params.put(&format!("{}{}", prefix, "DatabaseName"), &field_value); } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), &field_value.to_string(), ); } params.put(&format!("{}{}", prefix, "Engine"), &obj.engine); if let Some(ref field_value) = obj.engine_version { params.put(&format!("{}{}", prefix, "EngineVersion"), &field_value); } if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } if let Some(ref field_value) = obj.option_group_name { params.put(&format!("{}{}", prefix, "OptionGroupName"), &field_value); } if let Some(ref field_value) = obj.port { params.put(&format!("{}{}", prefix, "Port"), &field_value.to_string()); } params.put( &format!("{}{}", prefix, "SnapshotIdentifier"), &obj.snapshot_identifier, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } if let Some(ref field_value) = obj.vpc_security_group_ids { VpcSecurityGroupIdListSerializer::serialize( params, &format!("{}{}", prefix, "VpcSecurityGroupId"), field_value, ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct RestoreDBClusterFromSnapshotResult { pub db_cluster: Option<DBCluster>, } struct RestoreDBClusterFromSnapshotResultDeserializer; impl RestoreDBClusterFromSnapshotResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RestoreDBClusterFromSnapshotResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RestoreDBClusterFromSnapshotResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBCluster" => { obj.db_cluster = Some(try!(DBClusterDeserializer::deserialize("DBCluster", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct RestoreDBClusterToPointInTimeMessage { /// <p><p>The name of the new DB cluster to be created.</p> <p>Constraints:</p> <ul> <li> <p>Must contain from 1 to 63 letters, numbers, or hyphens</p> </li> <li> <p>First character must be a letter</p> </li> <li> <p>Cannot end with a hyphen or contain two consecutive hyphens</p> </li> </ul></p> pub db_cluster_identifier: String, /// <p>The DB subnet group name to use for the new DB cluster.</p> <p>Constraints: If supplied, must match the name of an existing DBSubnetGroup.</p> <p>Example: <code>mySubnetgroup</code> </p> pub db_subnet_group_name: Option<String>, /// <p>True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.</p> <p>Default: <code>false</code> </p> pub enable_iam_database_authentication: Option<bool>, /// <p>The AWS KMS key identifier to use when restoring an encrypted DB cluster from an encrypted DB cluster.</p> <p>The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key. If you are restoring a DB cluster with the same AWS account that owns the KMS encryption key used to encrypt the new DB cluster, then you can use the KMS key alias instead of the ARN for the KMS encryption key.</p> <p>You can restore to a new DB cluster and encrypt the new DB cluster with a KMS key that is different than the KMS key used to encrypt the source DB cluster. The new DB cluster is encrypted with the KMS key identified by the <code>KmsKeyId</code> parameter.</p> <p>If you do not specify a value for the <code>KmsKeyId</code> parameter, then the following will occur:</p> <ul> <li> <p>If the DB cluster is encrypted, then the restored DB cluster is encrypted using the KMS key that was used to encrypt the source DB cluster.</p> </li> <li> <p>If the DB cluster is not encrypted, then the restored DB cluster is not encrypted.</p> </li> </ul> <p>If <code>DBClusterIdentifier</code> refers to a DB cluster that is not encrypted, then the restore request is rejected.</p> pub kms_key_id: Option<String>, /// <p>The name of the option group for the new DB cluster.</p> pub option_group_name: Option<String>, /// <p>The port number on which the new DB cluster accepts connections.</p> <p>Constraints: Value must be <code>1150-65535</code> </p> <p>Default: The same port as the original DB cluster.</p> pub port: Option<i64>, /// <p>The date and time to restore the DB cluster to.</p> <p>Valid Values: Value must be a time in Universal Coordinated Time (UTC) format</p> <p>Constraints:</p> <ul> <li> <p>Must be before the latest restorable time for the DB instance</p> </li> <li> <p>Must be specified if <code>UseLatestRestorableTime</code> parameter is not provided</p> </li> <li> <p>Cannot be specified if <code>UseLatestRestorableTime</code> parameter is true</p> </li> <li> <p>Cannot be specified if <code>RestoreType</code> parameter is <code>copy-on-write</code> </p> </li> </ul> <p>Example: <code>2015-03-07T23:45:00Z</code> </p> pub restore_to_time: Option<String>, /// <p>The type of restore to be performed. You can specify one of the following values:</p> <ul> <li> <p> <code>full-copy</code> - The new DB cluster is restored as a full copy of the source DB cluster.</p> </li> <li> <p> <code>copy-on-write</code> - The new DB cluster is restored as a clone of the source DB cluster.</p> </li> </ul> <p>Constraints: You can't specify <code>copy-on-write</code> if the engine version of the source DB cluster is earlier than 1.11.</p> <p>If you don't specify a <code>RestoreType</code> value, then the new DB cluster is restored as a full copy of the source DB cluster.</p> pub restore_type: Option<String>, /// <p><p>The identifier of the source DB cluster from which to restore.</p> <p>Constraints:</p> <ul> <li> <p>Must match the identifier of an existing DBCluster.</p> </li> </ul></p> pub source_db_cluster_identifier: String, pub tags: Option<Vec<Tag>>, /// <p>A value that is set to <code>true</code> to restore the DB cluster to the latest restorable backup time, and <code>false</code> otherwise. </p> <p>Default: <code>false</code> </p> <p>Constraints: Cannot be specified if <code>RestoreToTime</code> parameter is provided.</p> pub use_latest_restorable_time: Option<bool>, /// <p>A list of VPC security groups that the new DB cluster belongs to.</p> pub vpc_security_group_ids: Option<Vec<String>>, } /// Serialize `RestoreDBClusterToPointInTimeMessage` contents to a `SignedRequest`. struct RestoreDBClusterToPointInTimeMessageSerializer; impl RestoreDBClusterToPointInTimeMessageSerializer { fn serialize(params: &mut Params, name: &str, obj: &RestoreDBClusterToPointInTimeMessage) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } params.put( &format!("{}{}", prefix, "DBClusterIdentifier"), &obj.db_cluster_identifier, ); if let Some(ref field_value) = obj.db_subnet_group_name { params.put(&format!("{}{}", prefix, "DBSubnetGroupName"), &field_value); } if let Some(ref field_value) = obj.enable_iam_database_authentication { params.put( &format!("{}{}", prefix, "EnableIAMDatabaseAuthentication"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.kms_key_id { params.put(&format!("{}{}", prefix, "KmsKeyId"), &field_value); } if let Some(ref field_value) = obj.option_group_name { params.put(&format!("{}{}", prefix, "OptionGroupName"), &field_value); } if let Some(ref field_value) = obj.port { params.put(&format!("{}{}", prefix, "Port"), &field_value.to_string()); } if let Some(ref field_value) = obj.restore_to_time { params.put(&format!("{}{}", prefix, "RestoreToTime"), &field_value); } if let Some(ref field_value) = obj.restore_type { params.put(&format!("{}{}", prefix, "RestoreType"), &field_value); } params.put( &format!("{}{}", prefix, "SourceDBClusterIdentifier"), &obj.source_db_cluster_identifier, ); if let Some(ref field_value) = obj.tags { TagListSerializer::serialize(params, &format!("{}{}", prefix, "Tag"), field_value); } if let Some(ref field_value) = obj.use_latest_restorable_time { params.put( &format!("{}{}", prefix, "UseLatestRestorableTime"), &field_value.to_string(), ); } if let Some(ref field_value) = obj.vpc_security_group_ids { VpcSecurityGroupIdListSerializer::serialize( params, &format!("{}{}", prefix, "VpcSecurityGroupId"), field_value, ); } } } #[derive(Default, Debug, Clone, PartialEq)] pub struct RestoreDBClusterToPointInTimeResult { pub db_cluster: Option<DBCluster>, } struct RestoreDBClusterToPointInTimeResultDeserializer; impl RestoreDBClusterToPointInTimeResultDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<RestoreDBClusterToPointInTimeResult, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = RestoreDBClusterToPointInTimeResult::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "DBCluster" => { obj.db_cluster = Some(try!(DBClusterDeserializer::deserialize("DBCluster", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct SourceIdsListDeserializer; impl SourceIdsListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<String>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "SourceId" { obj.push(try!(StringDeserializer::deserialize("SourceId", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Serialize `SourceIdsList` contents to a `SignedRequest`. struct SourceIdsListSerializer; impl SourceIdsListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } struct SourceTypeDeserializer; impl SourceTypeDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } struct StringDeserializer; impl StringDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p> This data type is used as a response element in the <a>DescribeDBSubnetGroups</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct Subnet { pub subnet_availability_zone: Option<AvailabilityZone>, /// <p>Specifies the identifier of the subnet.</p> pub subnet_identifier: Option<String>, /// <p>Specifies the status of the subnet.</p> pub subnet_status: Option<String>, } struct SubnetDeserializer; impl SubnetDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Subnet, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Subnet::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "SubnetAvailabilityZone" => { obj.subnet_availability_zone = Some(try!(AvailabilityZoneDeserializer::deserialize( "SubnetAvailabilityZone", stack ))); } "SubnetIdentifier" => { obj.subnet_identifier = Some(try!(StringDeserializer::deserialize( "SubnetIdentifier", stack ))); } "SubnetStatus" => { obj.subnet_status = Some(try!(StringDeserializer::deserialize("SubnetStatus", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// Serialize `SubnetIdentifierList` contents to a `SignedRequest`. struct SubnetIdentifierListSerializer; impl SubnetIdentifierListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } struct SubnetListDeserializer; impl SubnetListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Subnet>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Subnet" { obj.push(try!(SubnetDeserializer::deserialize("Subnet", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct SupportedCharacterSetsListDeserializer; impl SupportedCharacterSetsListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<CharacterSet>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "CharacterSet" { obj.push(try!(CharacterSetDeserializer::deserialize( "CharacterSet", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct SupportedTimezonesListDeserializer; impl SupportedTimezonesListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Timezone>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Timezone" { obj.push(try!(TimezoneDeserializer::deserialize("Timezone", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct TStampDeserializer; impl TStampDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<String, XmlParseError> { try!(start_element(tag_name, stack)); let obj = try!(characters(stack)); try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Metadata assigned to an Amazon Neptune resource consisting of a key-value pair.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct Tag { /// <p>A key is the required name of the tag. The string value can be from 1 to 128 Unicode characters in length and can't be prefixed with "aws:" or "rds:". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$").</p> pub key: Option<String>, /// <p>A value is the optional value of the tag. The string value can be from 1 to 256 Unicode characters in length and can't be prefixed with "aws:" or "rds:". The string can only contain only the set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$").</p> pub value: Option<String>, } struct TagDeserializer; impl TagDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Tag, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Tag::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Key" => { obj.key = Some(try!(StringDeserializer::deserialize("Key", stack))); } "Value" => { obj.value = Some(try!(StringDeserializer::deserialize("Value", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// Serialize `Tag` contents to a `SignedRequest`. struct TagSerializer; impl TagSerializer { fn serialize(params: &mut Params, name: &str, obj: &Tag) { let mut prefix = name.to_string(); if prefix != "" { prefix.push_str("."); } if let Some(ref field_value) = obj.key { params.put(&format!("{}{}", prefix, "Key"), &field_value); } if let Some(ref field_value) = obj.value { params.put(&format!("{}{}", prefix, "Value"), &field_value); } } } struct TagListDeserializer; impl TagListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<Tag>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "Tag" { obj.push(try!(TagDeserializer::deserialize("Tag", stack))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Serialize `TagList` contents to a `SignedRequest`. struct TagListSerializer; impl TagListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<Tag>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); TagSerializer::serialize(params, &key, obj); } } } /// <p><p/></p> #[derive(Default, Debug, Clone, PartialEq)] pub struct TagListMessage { /// <p>List of tags returned by the ListTagsForResource operation.</p> pub tag_list: Option<Vec<Tag>>, } struct TagListMessageDeserializer; impl TagListMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<TagListMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = TagListMessage::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "TagList" => { obj.tag_list = Some(try!(TagListDeserializer::deserialize("TagList", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>A time zone associated with a <a>DBInstance</a>. This data type is an element in the response to the <a>DescribeDBInstances</a>, and the <a>DescribeDBEngineVersions</a> actions. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct Timezone { /// <p>The name of the time zone.</p> pub timezone_name: Option<String>, } struct TimezoneDeserializer; impl TimezoneDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Timezone, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = Timezone::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "TimezoneName" => { obj.timezone_name = Some(try!(StringDeserializer::deserialize("TimezoneName", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>The version of the database engine that a DB instance can be upgraded to.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct UpgradeTarget { /// <p>A value that indicates whether the target version is applied to any source DB instances that have AutoMinorVersionUpgrade set to true.</p> pub auto_upgrade: Option<bool>, /// <p>The version of the database engine that a DB instance can be upgraded to.</p> pub description: Option<String>, /// <p>The name of the upgrade target database engine.</p> pub engine: Option<String>, /// <p>The version number of the upgrade target database engine.</p> pub engine_version: Option<String>, /// <p>A value that indicates whether a database engine is upgraded to a major version.</p> pub is_major_version_upgrade: Option<bool>, } struct UpgradeTargetDeserializer; impl UpgradeTargetDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<UpgradeTarget, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = UpgradeTarget::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "AutoUpgrade" => { obj.auto_upgrade = Some(try!(BooleanDeserializer::deserialize("AutoUpgrade", stack))); } "Description" => { obj.description = Some(try!(StringDeserializer::deserialize("Description", stack))); } "Engine" => { obj.engine = Some(try!(StringDeserializer::deserialize("Engine", stack))); } "EngineVersion" => { obj.engine_version = Some(try!(StringDeserializer::deserialize( "EngineVersion", stack ))); } "IsMajorVersionUpgrade" => { obj.is_major_version_upgrade = Some(try!( BooleanDeserializer::deserialize("IsMajorVersionUpgrade", stack) )); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Information about valid modifications that you can make to your DB instance. Contains the result of a successful call to the <a>DescribeValidDBInstanceModifications</a> action. You can use this information when you call <a>ModifyDBInstance</a>. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ValidDBInstanceModificationsMessage { /// <p>Valid storage options for your DB instance. </p> pub storage: Option<Vec<ValidStorageOptions>>, } struct ValidDBInstanceModificationsMessageDeserializer; impl ValidDBInstanceModificationsMessageDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ValidDBInstanceModificationsMessage, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ValidDBInstanceModificationsMessage::default(); loop { let next_event = match stack.peek() {
_ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Storage" => { obj.storage = Some(try!(ValidStorageOptionsListDeserializer::deserialize( "Storage", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } /// <p>Information about valid modifications that you can make to your DB instance. Contains the result of a successful call to the <a>DescribeValidDBInstanceModifications</a> action. </p> #[derive(Default, Debug, Clone, PartialEq)] pub struct ValidStorageOptions { /// <p>The valid range of Provisioned IOPS to gibibytes of storage multiplier. For example, 3-10, which means that provisioned IOPS can be between 3 and 10 times storage. </p> pub iops_to_storage_ratio: Option<Vec<DoubleRange>>, /// <p>The valid range of provisioned IOPS. For example, 1000-20000. </p> pub provisioned_iops: Option<Vec<Range>>, /// <p>The valid range of storage in gibibytes. For example, 100 to 16384. </p> pub storage_size: Option<Vec<Range>>, /// <p>The valid storage types for your DB instance. For example, gp2, io1. </p> pub storage_type: Option<String>, } struct ValidStorageOptionsDeserializer; impl ValidStorageOptionsDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<ValidStorageOptions, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = ValidStorageOptions::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "IopsToStorageRatio" => { obj.iops_to_storage_ratio = Some(try!( DoubleRangeListDeserializer::deserialize("IopsToStorageRatio", stack) )); } "ProvisionedIops" => { obj.provisioned_iops = Some(try!(RangeListDeserializer::deserialize( "ProvisionedIops", stack ))); } "StorageSize" => { obj.storage_size = Some(try!(RangeListDeserializer::deserialize( "StorageSize", stack ))); } "StorageType" => { obj.storage_type = Some(try!(StringDeserializer::deserialize("StorageType", stack))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct ValidStorageOptionsListDeserializer; impl ValidStorageOptionsListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<ValidStorageOptions>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "ValidStorageOptions" { obj.push(try!(ValidStorageOptionsDeserializer::deserialize( "ValidStorageOptions", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } struct ValidUpgradeTargetListDeserializer; impl ValidUpgradeTargetListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<UpgradeTarget>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "UpgradeTarget" { obj.push(try!(UpgradeTargetDeserializer::deserialize( "UpgradeTarget", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Serialize `VpcSecurityGroupIdList` contents to a `SignedRequest`. struct VpcSecurityGroupIdListSerializer; impl VpcSecurityGroupIdListSerializer { fn serialize(params: &mut Params, name: &str, obj: &Vec<String>) { for (index, obj) in obj.iter().enumerate() { let key = format!("{}.member.{}", name, index + 1); params.put(&key, &obj); } } } /// <p>This data type is used as a response element for queries on VPC security group membership.</p> #[derive(Default, Debug, Clone, PartialEq)] pub struct VpcSecurityGroupMembership { /// <p>The status of the VPC security group.</p> pub status: Option<String>, /// <p>The name of the VPC security group.</p> pub vpc_security_group_id: Option<String>, } struct VpcSecurityGroupMembershipDeserializer; impl VpcSecurityGroupMembershipDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<VpcSecurityGroupMembership, XmlParseError> { try!(start_element(tag_name, stack)); let mut obj = VpcSecurityGroupMembership::default(); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => match &name[..] { "Status" => { obj.status = Some(try!(StringDeserializer::deserialize("Status", stack))); } "VpcSecurityGroupId" => { obj.vpc_security_group_id = Some(try!(StringDeserializer::deserialize( "VpcSecurityGroupId", stack ))); } _ => skip_tree(stack), }, DeserializerNext::Close => break, DeserializerNext::Skip => { stack.next(); } } } try!(end_element(tag_name, stack)); Ok(obj) } } struct VpcSecurityGroupMembershipListDeserializer; impl VpcSecurityGroupMembershipListDeserializer { #[allow(unused_variables)] fn deserialize<'a, T: Peek + Next>( tag_name: &str, stack: &mut T, ) -> Result<Vec<VpcSecurityGroupMembership>, XmlParseError> { let mut obj = vec![]; try!(start_element(tag_name, stack)); loop { let next_event = match stack.peek() { Some(&Ok(XmlEvent::EndElement { .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) } _ => DeserializerNext::Skip, }; match next_event { DeserializerNext::Element(name) => { if name == "VpcSecurityGroupMembership" { obj.push(try!(VpcSecurityGroupMembershipDeserializer::deserialize( "VpcSecurityGroupMembership", stack ))); } else { skip_tree(stack); } } DeserializerNext::Close => { try!(end_element(tag_name, stack)); break; } DeserializerNext::Skip => { stack.next(); } } } Ok(obj) } } /// Errors returned by AddRoleToDBCluster #[derive(Debug, PartialEq)] pub enum AddRoleToDBClusterError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p>The specified IAM role Amazon Resource Name (ARN) is already associated with the specified DB cluster.</p> DBClusterRoleAlreadyExistsFault(String), /// <p>You have exceeded the maximum number of IAM roles that can be associated with the specified DB cluster.</p> DBClusterRoleQuotaExceededFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl AddRoleToDBClusterError { pub fn from_body(body: &str) -> AddRoleToDBClusterError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => AddRoleToDBClusterError::DBClusterNotFoundFault( String::from(parsed_error.message), ), "DBClusterRoleAlreadyExists" => { AddRoleToDBClusterError::DBClusterRoleAlreadyExistsFault(String::from( parsed_error.message, )) } "DBClusterRoleQuotaExceeded" => { AddRoleToDBClusterError::DBClusterRoleQuotaExceededFault(String::from( parsed_error.message, )) } "InvalidDBClusterStateFault" => { AddRoleToDBClusterError::InvalidDBClusterStateFault(String::from( parsed_error.message, )) } _ => AddRoleToDBClusterError::Unknown(String::from(body)), }, Err(_) => AddRoleToDBClusterError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for AddRoleToDBClusterError { fn from(err: XmlParseError) -> AddRoleToDBClusterError { let XmlParseError(message) = err; AddRoleToDBClusterError::Unknown(message.to_string()) } } impl From<CredentialsError> for AddRoleToDBClusterError { fn from(err: CredentialsError) -> AddRoleToDBClusterError { AddRoleToDBClusterError::Credentials(err) } } impl From<HttpDispatchError> for AddRoleToDBClusterError { fn from(err: HttpDispatchError) -> AddRoleToDBClusterError { AddRoleToDBClusterError::HttpDispatch(err) } } impl From<io::Error> for AddRoleToDBClusterError { fn from(err: io::Error) -> AddRoleToDBClusterError { AddRoleToDBClusterError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for AddRoleToDBClusterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for AddRoleToDBClusterError { fn description(&self) -> &str { match *self { AddRoleToDBClusterError::DBClusterNotFoundFault(ref cause) => cause, AddRoleToDBClusterError::DBClusterRoleAlreadyExistsFault(ref cause) => cause, AddRoleToDBClusterError::DBClusterRoleQuotaExceededFault(ref cause) => cause, AddRoleToDBClusterError::InvalidDBClusterStateFault(ref cause) => cause, AddRoleToDBClusterError::Validation(ref cause) => cause, AddRoleToDBClusterError::Credentials(ref err) => err.description(), AddRoleToDBClusterError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } AddRoleToDBClusterError::Unknown(ref cause) => cause, } } } /// Errors returned by AddSourceIdentifierToSubscription #[derive(Debug, PartialEq)] pub enum AddSourceIdentifierToSubscriptionError { SourceNotFoundFault(String), SubscriptionNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl AddSourceIdentifierToSubscriptionError { pub fn from_body(body: &str) -> AddSourceIdentifierToSubscriptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "SourceNotFound" => AddSourceIdentifierToSubscriptionError::SourceNotFoundFault( String::from(parsed_error.message), ), "SubscriptionNotFound" => { AddSourceIdentifierToSubscriptionError::SubscriptionNotFoundFault(String::from( parsed_error.message, )) } _ => AddSourceIdentifierToSubscriptionError::Unknown(String::from(body)), }, Err(_) => AddSourceIdentifierToSubscriptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for AddSourceIdentifierToSubscriptionError { fn from(err: XmlParseError) -> AddSourceIdentifierToSubscriptionError { let XmlParseError(message) = err; AddSourceIdentifierToSubscriptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for AddSourceIdentifierToSubscriptionError { fn from(err: CredentialsError) -> AddSourceIdentifierToSubscriptionError { AddSourceIdentifierToSubscriptionError::Credentials(err) } } impl From<HttpDispatchError> for AddSourceIdentifierToSubscriptionError { fn from(err: HttpDispatchError) -> AddSourceIdentifierToSubscriptionError { AddSourceIdentifierToSubscriptionError::HttpDispatch(err) } } impl From<io::Error> for AddSourceIdentifierToSubscriptionError { fn from(err: io::Error) -> AddSourceIdentifierToSubscriptionError { AddSourceIdentifierToSubscriptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for AddSourceIdentifierToSubscriptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for AddSourceIdentifierToSubscriptionError { fn description(&self) -> &str { match *self { AddSourceIdentifierToSubscriptionError::SourceNotFoundFault(ref cause) => cause, AddSourceIdentifierToSubscriptionError::SubscriptionNotFoundFault(ref cause) => cause, AddSourceIdentifierToSubscriptionError::Validation(ref cause) => cause, AddSourceIdentifierToSubscriptionError::Credentials(ref err) => err.description(), AddSourceIdentifierToSubscriptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } AddSourceIdentifierToSubscriptionError::Unknown(ref cause) => cause, } } } /// Errors returned by AddTagsToResource #[derive(Debug, PartialEq)] pub enum AddTagsToResourceError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p> <i>DBInstanceIdentifier</i> does not refer to an existing DB instance. </p> DBInstanceNotFoundFault(String), /// <p> <i>DBSnapshotIdentifier</i> does not refer to an existing DB snapshot. </p> DBSnapshotNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl AddTagsToResourceError { pub fn from_body(body: &str) -> AddTagsToResourceError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => AddTagsToResourceError::DBClusterNotFoundFault( String::from(parsed_error.message), ), "DBInstanceNotFound" => AddTagsToResourceError::DBInstanceNotFoundFault( String::from(parsed_error.message), ), "DBSnapshotNotFound" => AddTagsToResourceError::DBSnapshotNotFoundFault( String::from(parsed_error.message), ), _ => AddTagsToResourceError::Unknown(String::from(body)), }, Err(_) => AddTagsToResourceError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for AddTagsToResourceError { fn from(err: XmlParseError) -> AddTagsToResourceError { let XmlParseError(message) = err; AddTagsToResourceError::Unknown(message.to_string()) } } impl From<CredentialsError> for AddTagsToResourceError { fn from(err: CredentialsError) -> AddTagsToResourceError { AddTagsToResourceError::Credentials(err) } } impl From<HttpDispatchError> for AddTagsToResourceError { fn from(err: HttpDispatchError) -> AddTagsToResourceError { AddTagsToResourceError::HttpDispatch(err) } } impl From<io::Error> for AddTagsToResourceError { fn from(err: io::Error) -> AddTagsToResourceError { AddTagsToResourceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for AddTagsToResourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for AddTagsToResourceError { fn description(&self) -> &str { match *self { AddTagsToResourceError::DBClusterNotFoundFault(ref cause) => cause, AddTagsToResourceError::DBInstanceNotFoundFault(ref cause) => cause, AddTagsToResourceError::DBSnapshotNotFoundFault(ref cause) => cause, AddTagsToResourceError::Validation(ref cause) => cause, AddTagsToResourceError::Credentials(ref err) => err.description(), AddTagsToResourceError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } AddTagsToResourceError::Unknown(ref cause) => cause, } } } /// Errors returned by ApplyPendingMaintenanceAction #[derive(Debug, PartialEq)] pub enum ApplyPendingMaintenanceActionError { /// <p>The specified resource ID was not found.</p> ResourceNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ApplyPendingMaintenanceActionError { pub fn from_body(body: &str) -> ApplyPendingMaintenanceActionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "ResourceNotFoundFault" => { ApplyPendingMaintenanceActionError::ResourceNotFoundFault(String::from( parsed_error.message, )) } _ => ApplyPendingMaintenanceActionError::Unknown(String::from(body)), }, Err(_) => ApplyPendingMaintenanceActionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ApplyPendingMaintenanceActionError { fn from(err: XmlParseError) -> ApplyPendingMaintenanceActionError { let XmlParseError(message) = err; ApplyPendingMaintenanceActionError::Unknown(message.to_string()) } } impl From<CredentialsError> for ApplyPendingMaintenanceActionError { fn from(err: CredentialsError) -> ApplyPendingMaintenanceActionError { ApplyPendingMaintenanceActionError::Credentials(err) } } impl From<HttpDispatchError> for ApplyPendingMaintenanceActionError { fn from(err: HttpDispatchError) -> ApplyPendingMaintenanceActionError { ApplyPendingMaintenanceActionError::HttpDispatch(err) } } impl From<io::Error> for ApplyPendingMaintenanceActionError { fn from(err: io::Error) -> ApplyPendingMaintenanceActionError { ApplyPendingMaintenanceActionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ApplyPendingMaintenanceActionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ApplyPendingMaintenanceActionError { fn description(&self) -> &str { match *self { ApplyPendingMaintenanceActionError::ResourceNotFoundFault(ref cause) => cause, ApplyPendingMaintenanceActionError::Validation(ref cause) => cause, ApplyPendingMaintenanceActionError::Credentials(ref err) => err.description(), ApplyPendingMaintenanceActionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ApplyPendingMaintenanceActionError::Unknown(ref cause) => cause, } } } /// Errors returned by CopyDBClusterParameterGroup #[derive(Debug, PartialEq)] pub enum CopyDBClusterParameterGroupError { /// <p>A DB parameter group with the same name exists.</p> DBParameterGroupAlreadyExistsFault(String), /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p>Request would result in user exceeding the allowed number of DB parameter groups.</p> DBParameterGroupQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CopyDBClusterParameterGroupError { pub fn from_body(body: &str) -> CopyDBClusterParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupAlreadyExists" => { CopyDBClusterParameterGroupError::DBParameterGroupAlreadyExistsFault( String::from(parsed_error.message), ) } "DBParameterGroupNotFound" => { CopyDBClusterParameterGroupError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "DBParameterGroupQuotaExceeded" => { CopyDBClusterParameterGroupError::DBParameterGroupQuotaExceededFault( String::from(parsed_error.message), ) } _ => CopyDBClusterParameterGroupError::Unknown(String::from(body)), }, Err(_) => CopyDBClusterParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CopyDBClusterParameterGroupError { fn from(err: XmlParseError) -> CopyDBClusterParameterGroupError { let XmlParseError(message) = err; CopyDBClusterParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for CopyDBClusterParameterGroupError { fn from(err: CredentialsError) -> CopyDBClusterParameterGroupError { CopyDBClusterParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for CopyDBClusterParameterGroupError { fn from(err: HttpDispatchError) -> CopyDBClusterParameterGroupError { CopyDBClusterParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for CopyDBClusterParameterGroupError { fn from(err: io::Error) -> CopyDBClusterParameterGroupError { CopyDBClusterParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CopyDBClusterParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CopyDBClusterParameterGroupError { fn description(&self) -> &str { match *self { CopyDBClusterParameterGroupError::DBParameterGroupAlreadyExistsFault(ref cause) => { cause } CopyDBClusterParameterGroupError::DBParameterGroupNotFoundFault(ref cause) => cause, CopyDBClusterParameterGroupError::DBParameterGroupQuotaExceededFault(ref cause) => { cause } CopyDBClusterParameterGroupError::Validation(ref cause) => cause, CopyDBClusterParameterGroupError::Credentials(ref err) => err.description(), CopyDBClusterParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CopyDBClusterParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by CopyDBClusterSnapshot #[derive(Debug, PartialEq)] pub enum CopyDBClusterSnapshotError { /// <p>User already has a DB cluster snapshot with the given identifier.</p> DBClusterSnapshotAlreadyExistsFault(String), /// <p> <i>DBClusterSnapshotIdentifier</i> does not refer to an existing DB cluster snapshot. </p> DBClusterSnapshotNotFoundFault(String), /// <p>The supplied value is not a valid DB cluster snapshot state.</p> InvalidDBClusterSnapshotStateFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// <p>Error accessing KMS key.</p> KMSKeyNotAccessibleFault(String), /// <p>Request would result in user exceeding the allowed number of DB snapshots.</p> SnapshotQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CopyDBClusterSnapshotError { pub fn from_body(body: &str) -> CopyDBClusterSnapshotError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterSnapshotAlreadyExistsFault" => { CopyDBClusterSnapshotError::DBClusterSnapshotAlreadyExistsFault(String::from( parsed_error.message, )) } "DBClusterSnapshotNotFoundFault" => { CopyDBClusterSnapshotError::DBClusterSnapshotNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBClusterSnapshotStateFault" => { CopyDBClusterSnapshotError::InvalidDBClusterSnapshotStateFault(String::from( parsed_error.message, )) } "InvalidDBClusterStateFault" => { CopyDBClusterSnapshotError::InvalidDBClusterStateFault(String::from( parsed_error.message, )) } "KMSKeyNotAccessibleFault" => CopyDBClusterSnapshotError::KMSKeyNotAccessibleFault( String::from(parsed_error.message), ), "SnapshotQuotaExceeded" => CopyDBClusterSnapshotError::SnapshotQuotaExceededFault( String::from(parsed_error.message), ), _ => CopyDBClusterSnapshotError::Unknown(String::from(body)), }, Err(_) => CopyDBClusterSnapshotError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CopyDBClusterSnapshotError { fn from(err: XmlParseError) -> CopyDBClusterSnapshotError { let XmlParseError(message) = err; CopyDBClusterSnapshotError::Unknown(message.to_string()) } } impl From<CredentialsError> for CopyDBClusterSnapshotError { fn from(err: CredentialsError) -> CopyDBClusterSnapshotError { CopyDBClusterSnapshotError::Credentials(err) } } impl From<HttpDispatchError> for CopyDBClusterSnapshotError { fn from(err: HttpDispatchError) -> CopyDBClusterSnapshotError { CopyDBClusterSnapshotError::HttpDispatch(err) } } impl From<io::Error> for CopyDBClusterSnapshotError { fn from(err: io::Error) -> CopyDBClusterSnapshotError { CopyDBClusterSnapshotError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CopyDBClusterSnapshotError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CopyDBClusterSnapshotError { fn description(&self) -> &str { match *self { CopyDBClusterSnapshotError::DBClusterSnapshotAlreadyExistsFault(ref cause) => cause, CopyDBClusterSnapshotError::DBClusterSnapshotNotFoundFault(ref cause) => cause, CopyDBClusterSnapshotError::InvalidDBClusterSnapshotStateFault(ref cause) => cause, CopyDBClusterSnapshotError::InvalidDBClusterStateFault(ref cause) => cause, CopyDBClusterSnapshotError::KMSKeyNotAccessibleFault(ref cause) => cause, CopyDBClusterSnapshotError::SnapshotQuotaExceededFault(ref cause) => cause, CopyDBClusterSnapshotError::Validation(ref cause) => cause, CopyDBClusterSnapshotError::Credentials(ref err) => err.description(), CopyDBClusterSnapshotError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CopyDBClusterSnapshotError::Unknown(ref cause) => cause, } } } /// Errors returned by CopyDBParameterGroup #[derive(Debug, PartialEq)] pub enum CopyDBParameterGroupError { /// <p>A DB parameter group with the same name exists.</p> DBParameterGroupAlreadyExistsFault(String), /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p>Request would result in user exceeding the allowed number of DB parameter groups.</p> DBParameterGroupQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CopyDBParameterGroupError { pub fn from_body(body: &str) -> CopyDBParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupAlreadyExists" => { CopyDBParameterGroupError::DBParameterGroupAlreadyExistsFault(String::from( parsed_error.message, )) } "DBParameterGroupNotFound" => { CopyDBParameterGroupError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "DBParameterGroupQuotaExceeded" => { CopyDBParameterGroupError::DBParameterGroupQuotaExceededFault(String::from( parsed_error.message, )) } _ => CopyDBParameterGroupError::Unknown(String::from(body)), }, Err(_) => CopyDBParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CopyDBParameterGroupError { fn from(err: XmlParseError) -> CopyDBParameterGroupError { let XmlParseError(message) = err; CopyDBParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for CopyDBParameterGroupError { fn from(err: CredentialsError) -> CopyDBParameterGroupError { CopyDBParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for CopyDBParameterGroupError { fn from(err: HttpDispatchError) -> CopyDBParameterGroupError { CopyDBParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for CopyDBParameterGroupError { fn from(err: io::Error) -> CopyDBParameterGroupError { CopyDBParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CopyDBParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CopyDBParameterGroupError { fn description(&self) -> &str { match *self { CopyDBParameterGroupError::DBParameterGroupAlreadyExistsFault(ref cause) => cause, CopyDBParameterGroupError::DBParameterGroupNotFoundFault(ref cause) => cause, CopyDBParameterGroupError::DBParameterGroupQuotaExceededFault(ref cause) => cause, CopyDBParameterGroupError::Validation(ref cause) => cause, CopyDBParameterGroupError::Credentials(ref err) => err.description(), CopyDBParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CopyDBParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateDBCluster #[derive(Debug, PartialEq)] pub enum CreateDBClusterError { /// <p>User already has a DB cluster with the given identifier.</p> DBClusterAlreadyExistsFault(String), /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p> <i>DBClusterParameterGroupName</i> does not refer to an existing DB Cluster parameter group. </p> DBClusterParameterGroupNotFoundFault(String), /// <p>User attempted to create a new DB cluster and the user has already reached the maximum allowed DB cluster quota.</p> DBClusterQuotaExceededFault(String), /// <p> <i>DBInstanceIdentifier</i> does not refer to an existing DB instance. </p> DBInstanceNotFoundFault(String), /// <p>Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.</p> DBSubnetGroupDoesNotCoverEnoughAZs(String), /// <p> <i>DBSubnetGroupName</i> does not refer to an existing DB subnet group. </p> DBSubnetGroupNotFoundFault(String), /// <p>There is insufficient storage available for the current action. You may be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.</p> InsufficientStorageClusterCapacityFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// <p> The specified DB instance is not in the <i>available</i> state. </p> InvalidDBInstanceStateFault(String), /// <p>The DB subnet group cannot be deleted because it is in use.</p> InvalidDBSubnetGroupStateFault(String), /// <p>The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.</p> InvalidSubnet(String), /// <p>DB subnet group does not cover all Availability Zones after it is created because users' change.</p> InvalidVPCNetworkStateFault(String), /// <p>Error accessing KMS key.</p> KMSKeyNotAccessibleFault(String), /// <p>Request would result in user exceeding the allowed amount of storage available across all DB instances.</p> StorageQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateDBClusterError { pub fn from_body(body: &str) -> CreateDBClusterError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterAlreadyExistsFault" => CreateDBClusterError::DBClusterAlreadyExistsFault( String::from(parsed_error.message), ), "DBClusterNotFoundFault" => { CreateDBClusterError::DBClusterNotFoundFault(String::from(parsed_error.message)) } "DBClusterParameterGroupNotFound" => { CreateDBClusterError::DBClusterParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "DBClusterQuotaExceededFault" => CreateDBClusterError::DBClusterQuotaExceededFault( String::from(parsed_error.message), ), "DBInstanceNotFound" => CreateDBClusterError::DBInstanceNotFoundFault( String::from(parsed_error.message), ), "DBSubnetGroupDoesNotCoverEnoughAZs" => { CreateDBClusterError::DBSubnetGroupDoesNotCoverEnoughAZs(String::from( parsed_error.message, )) } "DBSubnetGroupNotFoundFault" => CreateDBClusterError::DBSubnetGroupNotFoundFault( String::from(parsed_error.message), ), "InsufficientStorageClusterCapacity" => { CreateDBClusterError::InsufficientStorageClusterCapacityFault(String::from( parsed_error.message, )) } "InvalidDBClusterStateFault" => CreateDBClusterError::InvalidDBClusterStateFault( String::from(parsed_error.message), ), "InvalidDBInstanceState" => CreateDBClusterError::InvalidDBInstanceStateFault( String::from(parsed_error.message), ), "InvalidDBSubnetGroupStateFault" => { CreateDBClusterError::InvalidDBSubnetGroupStateFault(String::from( parsed_error.message, )) } "InvalidSubnet" => { CreateDBClusterError::InvalidSubnet(String::from(parsed_error.message)) } "InvalidVPCNetworkStateFault" => CreateDBClusterError::InvalidVPCNetworkStateFault( String::from(parsed_error.message), ), "KMSKeyNotAccessibleFault" => CreateDBClusterError::KMSKeyNotAccessibleFault( String::from(parsed_error.message), ), "StorageQuotaExceeded" => CreateDBClusterError::StorageQuotaExceededFault( String::from(parsed_error.message), ), _ => CreateDBClusterError::Unknown(String::from(body)), }, Err(_) => CreateDBClusterError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateDBClusterError { fn from(err: XmlParseError) -> CreateDBClusterError { let XmlParseError(message) = err; CreateDBClusterError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateDBClusterError { fn from(err: CredentialsError) -> CreateDBClusterError { CreateDBClusterError::Credentials(err) } } impl From<HttpDispatchError> for CreateDBClusterError { fn from(err: HttpDispatchError) -> CreateDBClusterError { CreateDBClusterError::HttpDispatch(err) } } impl From<io::Error> for CreateDBClusterError { fn from(err: io::Error) -> CreateDBClusterError { CreateDBClusterError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateDBClusterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateDBClusterError { fn description(&self) -> &str { match *self { CreateDBClusterError::DBClusterAlreadyExistsFault(ref cause) => cause, CreateDBClusterError::DBClusterNotFoundFault(ref cause) => cause, CreateDBClusterError::DBClusterParameterGroupNotFoundFault(ref cause) => cause, CreateDBClusterError::DBClusterQuotaExceededFault(ref cause) => cause, CreateDBClusterError::DBInstanceNotFoundFault(ref cause) => cause, CreateDBClusterError::DBSubnetGroupDoesNotCoverEnoughAZs(ref cause) => cause, CreateDBClusterError::DBSubnetGroupNotFoundFault(ref cause) => cause, CreateDBClusterError::InsufficientStorageClusterCapacityFault(ref cause) => cause, CreateDBClusterError::InvalidDBClusterStateFault(ref cause) => cause, CreateDBClusterError::InvalidDBInstanceStateFault(ref cause) => cause, CreateDBClusterError::InvalidDBSubnetGroupStateFault(ref cause) => cause, CreateDBClusterError::InvalidSubnet(ref cause) => cause, CreateDBClusterError::InvalidVPCNetworkStateFault(ref cause) => cause, CreateDBClusterError::KMSKeyNotAccessibleFault(ref cause) => cause, CreateDBClusterError::StorageQuotaExceededFault(ref cause) => cause, CreateDBClusterError::Validation(ref cause) => cause, CreateDBClusterError::Credentials(ref err) => err.description(), CreateDBClusterError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), CreateDBClusterError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateDBClusterParameterGroup #[derive(Debug, PartialEq)] pub enum CreateDBClusterParameterGroupError { /// <p>A DB parameter group with the same name exists.</p> DBParameterGroupAlreadyExistsFault(String), /// <p>Request would result in user exceeding the allowed number of DB parameter groups.</p> DBParameterGroupQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateDBClusterParameterGroupError { pub fn from_body(body: &str) -> CreateDBClusterParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupAlreadyExists" => { CreateDBClusterParameterGroupError::DBParameterGroupAlreadyExistsFault( String::from(parsed_error.message), ) } "DBParameterGroupQuotaExceeded" => { CreateDBClusterParameterGroupError::DBParameterGroupQuotaExceededFault( String::from(parsed_error.message), ) } _ => CreateDBClusterParameterGroupError::Unknown(String::from(body)), }, Err(_) => CreateDBClusterParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateDBClusterParameterGroupError { fn from(err: XmlParseError) -> CreateDBClusterParameterGroupError { let XmlParseError(message) = err; CreateDBClusterParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateDBClusterParameterGroupError { fn from(err: CredentialsError) -> CreateDBClusterParameterGroupError { CreateDBClusterParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for CreateDBClusterParameterGroupError { fn from(err: HttpDispatchError) -> CreateDBClusterParameterGroupError { CreateDBClusterParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for CreateDBClusterParameterGroupError { fn from(err: io::Error) -> CreateDBClusterParameterGroupError { CreateDBClusterParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateDBClusterParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateDBClusterParameterGroupError { fn description(&self) -> &str { match *self { CreateDBClusterParameterGroupError::DBParameterGroupAlreadyExistsFault(ref cause) => { cause } CreateDBClusterParameterGroupError::DBParameterGroupQuotaExceededFault(ref cause) => { cause } CreateDBClusterParameterGroupError::Validation(ref cause) => cause, CreateDBClusterParameterGroupError::Credentials(ref err) => err.description(), CreateDBClusterParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateDBClusterParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateDBClusterSnapshot #[derive(Debug, PartialEq)] pub enum CreateDBClusterSnapshotError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p>User already has a DB cluster snapshot with the given identifier.</p> DBClusterSnapshotAlreadyExistsFault(String), /// <p>The supplied value is not a valid DB cluster snapshot state.</p> InvalidDBClusterSnapshotStateFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// <p>Request would result in user exceeding the allowed number of DB snapshots.</p> SnapshotQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateDBClusterSnapshotError { pub fn from_body(body: &str) -> CreateDBClusterSnapshotError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => CreateDBClusterSnapshotError::DBClusterNotFoundFault( String::from(parsed_error.message), ), "DBClusterSnapshotAlreadyExistsFault" => { CreateDBClusterSnapshotError::DBClusterSnapshotAlreadyExistsFault(String::from( parsed_error.message, )) } "InvalidDBClusterSnapshotStateFault" => { CreateDBClusterSnapshotError::InvalidDBClusterSnapshotStateFault(String::from( parsed_error.message, )) } "InvalidDBClusterStateFault" => { CreateDBClusterSnapshotError::InvalidDBClusterStateFault(String::from( parsed_error.message, )) } "SnapshotQuotaExceeded" => { CreateDBClusterSnapshotError::SnapshotQuotaExceededFault(String::from( parsed_error.message, )) } _ => CreateDBClusterSnapshotError::Unknown(String::from(body)), }, Err(_) => CreateDBClusterSnapshotError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateDBClusterSnapshotError { fn from(err: XmlParseError) -> CreateDBClusterSnapshotError { let XmlParseError(message) = err; CreateDBClusterSnapshotError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateDBClusterSnapshotError { fn from(err: CredentialsError) -> CreateDBClusterSnapshotError { CreateDBClusterSnapshotError::Credentials(err) } } impl From<HttpDispatchError> for CreateDBClusterSnapshotError { fn from(err: HttpDispatchError) -> CreateDBClusterSnapshotError { CreateDBClusterSnapshotError::HttpDispatch(err) } } impl From<io::Error> for CreateDBClusterSnapshotError { fn from(err: io::Error) -> CreateDBClusterSnapshotError { CreateDBClusterSnapshotError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateDBClusterSnapshotError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateDBClusterSnapshotError { fn description(&self) -> &str { match *self { CreateDBClusterSnapshotError::DBClusterNotFoundFault(ref cause) => cause, CreateDBClusterSnapshotError::DBClusterSnapshotAlreadyExistsFault(ref cause) => cause, CreateDBClusterSnapshotError::InvalidDBClusterSnapshotStateFault(ref cause) => cause, CreateDBClusterSnapshotError::InvalidDBClusterStateFault(ref cause) => cause, CreateDBClusterSnapshotError::SnapshotQuotaExceededFault(ref cause) => cause, CreateDBClusterSnapshotError::Validation(ref cause) => cause, CreateDBClusterSnapshotError::Credentials(ref err) => err.description(), CreateDBClusterSnapshotError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateDBClusterSnapshotError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateDBInstance #[derive(Debug, PartialEq)] pub enum CreateDBInstanceError { /// <p>Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.</p> <p>Neptune may not also be authorized via IAM to perform necessary actions on your behalf.</p> AuthorizationNotFoundFault(String), /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p>User already has a DB instance with the given identifier.</p> DBInstanceAlreadyExistsFault(String), /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p> <i>DBSecurityGroupName</i> does not refer to an existing DB security group. </p> DBSecurityGroupNotFoundFault(String), /// <p>Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.</p> DBSubnetGroupDoesNotCoverEnoughAZs(String), /// <p> <i>DBSubnetGroupName</i> does not refer to an existing DB subnet group. </p> DBSubnetGroupNotFoundFault(String), /// <p> <i>Domain</i> does not refer to an existing Active Directory Domain. </p> DomainNotFoundFault(String), /// <p>Request would result in user exceeding the allowed number of DB instances.</p> InstanceQuotaExceededFault(String), /// <p>Specified DB instance class is not available in the specified Availability Zone.</p> InsufficientDBInstanceCapacityFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// <p>The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.</p> InvalidSubnet(String), /// <p>DB subnet group does not cover all Availability Zones after it is created because users' change.</p> InvalidVPCNetworkStateFault(String), /// <p>Error accessing KMS key.</p> KMSKeyNotAccessibleFault(String), OptionGroupNotFoundFault(String), /// <p>Provisioned IOPS not available in the specified Availability Zone.</p> ProvisionedIopsNotAvailableInAZFault(String), /// <p>Request would result in user exceeding the allowed amount of storage available across all DB instances.</p> StorageQuotaExceededFault(String), /// <p> <i>StorageType</i> specified cannot be associated with the DB Instance. </p> StorageTypeNotSupportedFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateDBInstanceError { pub fn from_body(body: &str) -> CreateDBInstanceError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "AuthorizationNotFound" => CreateDBInstanceError::AuthorizationNotFoundFault( String::from(parsed_error.message), ), "DBClusterNotFoundFault" => CreateDBInstanceError::DBClusterNotFoundFault( String::from(parsed_error.message), ), "DBInstanceAlreadyExists" => CreateDBInstanceError::DBInstanceAlreadyExistsFault( String::from(parsed_error.message), ), "DBParameterGroupNotFound" => CreateDBInstanceError::DBParameterGroupNotFoundFault( String::from(parsed_error.message), ), "DBSecurityGroupNotFound" => CreateDBInstanceError::DBSecurityGroupNotFoundFault( String::from(parsed_error.message), ), "DBSubnetGroupDoesNotCoverEnoughAZs" => { CreateDBInstanceError::DBSubnetGroupDoesNotCoverEnoughAZs(String::from( parsed_error.message, )) } "DBSubnetGroupNotFoundFault" => CreateDBInstanceError::DBSubnetGroupNotFoundFault( String::from(parsed_error.message), ), "DomainNotFoundFault" => { CreateDBInstanceError::DomainNotFoundFault(String::from(parsed_error.message)) } "InstanceQuotaExceeded" => CreateDBInstanceError::InstanceQuotaExceededFault( String::from(parsed_error.message), ), "InsufficientDBInstanceCapacity" => { CreateDBInstanceError::InsufficientDBInstanceCapacityFault(String::from( parsed_error.message, )) } "InvalidDBClusterStateFault" => CreateDBInstanceError::InvalidDBClusterStateFault( String::from(parsed_error.message), ), "InvalidSubnet" => { CreateDBInstanceError::InvalidSubnet(String::from(parsed_error.message)) } "InvalidVPCNetworkStateFault" => { CreateDBInstanceError::InvalidVPCNetworkStateFault(String::from( parsed_error.message, )) } "KMSKeyNotAccessibleFault" => CreateDBInstanceError::KMSKeyNotAccessibleFault( String::from(parsed_error.message), ), "OptionGroupNotFoundFault" => CreateDBInstanceError::OptionGroupNotFoundFault( String::from(parsed_error.message), ), "ProvisionedIopsNotAvailableInAZFault" => { CreateDBInstanceError::ProvisionedIopsNotAvailableInAZFault(String::from( parsed_error.message, )) } "StorageQuotaExceeded" => CreateDBInstanceError::StorageQuotaExceededFault( String::from(parsed_error.message), ), "StorageTypeNotSupported" => CreateDBInstanceError::StorageTypeNotSupportedFault( String::from(parsed_error.message), ), _ => CreateDBInstanceError::Unknown(String::from(body)), }, Err(_) => CreateDBInstanceError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateDBInstanceError { fn from(err: XmlParseError) -> CreateDBInstanceError { let XmlParseError(message) = err; CreateDBInstanceError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateDBInstanceError { fn from(err: CredentialsError) -> CreateDBInstanceError { CreateDBInstanceError::Credentials(err) } } impl From<HttpDispatchError> for CreateDBInstanceError { fn from(err: HttpDispatchError) -> CreateDBInstanceError { CreateDBInstanceError::HttpDispatch(err) } } impl From<io::Error> for CreateDBInstanceError { fn from(err: io::Error) -> CreateDBInstanceError { CreateDBInstanceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateDBInstanceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateDBInstanceError { fn description(&self) -> &str { match *self { CreateDBInstanceError::AuthorizationNotFoundFault(ref cause) => cause, CreateDBInstanceError::DBClusterNotFoundFault(ref cause) => cause, CreateDBInstanceError::DBInstanceAlreadyExistsFault(ref cause) => cause, CreateDBInstanceError::DBParameterGroupNotFoundFault(ref cause) => cause, CreateDBInstanceError::DBSecurityGroupNotFoundFault(ref cause) => cause, CreateDBInstanceError::DBSubnetGroupDoesNotCoverEnoughAZs(ref cause) => cause, CreateDBInstanceError::DBSubnetGroupNotFoundFault(ref cause) => cause, CreateDBInstanceError::DomainNotFoundFault(ref cause) => cause, CreateDBInstanceError::InstanceQuotaExceededFault(ref cause) => cause, CreateDBInstanceError::InsufficientDBInstanceCapacityFault(ref cause) => cause, CreateDBInstanceError::InvalidDBClusterStateFault(ref cause) => cause, CreateDBInstanceError::InvalidSubnet(ref cause) => cause, CreateDBInstanceError::InvalidVPCNetworkStateFault(ref cause) => cause, CreateDBInstanceError::KMSKeyNotAccessibleFault(ref cause) => cause, CreateDBInstanceError::OptionGroupNotFoundFault(ref cause) => cause, CreateDBInstanceError::ProvisionedIopsNotAvailableInAZFault(ref cause) => cause, CreateDBInstanceError::StorageQuotaExceededFault(ref cause) => cause, CreateDBInstanceError::StorageTypeNotSupportedFault(ref cause) => cause, CreateDBInstanceError::Validation(ref cause) => cause, CreateDBInstanceError::Credentials(ref err) => err.description(), CreateDBInstanceError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), CreateDBInstanceError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateDBParameterGroup #[derive(Debug, PartialEq)] pub enum CreateDBParameterGroupError { /// <p>A DB parameter group with the same name exists.</p> DBParameterGroupAlreadyExistsFault(String), /// <p>Request would result in user exceeding the allowed number of DB parameter groups.</p> DBParameterGroupQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateDBParameterGroupError { pub fn from_body(body: &str) -> CreateDBParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupAlreadyExists" => { CreateDBParameterGroupError::DBParameterGroupAlreadyExistsFault(String::from( parsed_error.message, )) } "DBParameterGroupQuotaExceeded" => { CreateDBParameterGroupError::DBParameterGroupQuotaExceededFault(String::from( parsed_error.message, )) } _ => CreateDBParameterGroupError::Unknown(String::from(body)), }, Err(_) => CreateDBParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateDBParameterGroupError { fn from(err: XmlParseError) -> CreateDBParameterGroupError { let XmlParseError(message) = err; CreateDBParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateDBParameterGroupError { fn from(err: CredentialsError) -> CreateDBParameterGroupError { CreateDBParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for CreateDBParameterGroupError { fn from(err: HttpDispatchError) -> CreateDBParameterGroupError { CreateDBParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for CreateDBParameterGroupError { fn from(err: io::Error) -> CreateDBParameterGroupError { CreateDBParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateDBParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateDBParameterGroupError { fn description(&self) -> &str { match *self { CreateDBParameterGroupError::DBParameterGroupAlreadyExistsFault(ref cause) => cause, CreateDBParameterGroupError::DBParameterGroupQuotaExceededFault(ref cause) => cause, CreateDBParameterGroupError::Validation(ref cause) => cause, CreateDBParameterGroupError::Credentials(ref err) => err.description(), CreateDBParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateDBParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateDBSubnetGroup #[derive(Debug, PartialEq)] pub enum CreateDBSubnetGroupError { /// <p> <i>DBSubnetGroupName</i> is already used by an existing DB subnet group. </p> DBSubnetGroupAlreadyExistsFault(String), /// <p>Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.</p> DBSubnetGroupDoesNotCoverEnoughAZs(String), /// <p>Request would result in user exceeding the allowed number of DB subnet groups.</p> DBSubnetGroupQuotaExceededFault(String), /// <p>Request would result in user exceeding the allowed number of subnets in a DB subnet groups.</p> DBSubnetQuotaExceededFault(String), /// <p>The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.</p> InvalidSubnet(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateDBSubnetGroupError { pub fn from_body(body: &str) -> CreateDBSubnetGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBSubnetGroupAlreadyExists" => { CreateDBSubnetGroupError::DBSubnetGroupAlreadyExistsFault(String::from( parsed_error.message, )) } "DBSubnetGroupDoesNotCoverEnoughAZs" => { CreateDBSubnetGroupError::DBSubnetGroupDoesNotCoverEnoughAZs(String::from( parsed_error.message, )) } "DBSubnetGroupQuotaExceeded" => { CreateDBSubnetGroupError::DBSubnetGroupQuotaExceededFault(String::from( parsed_error.message, )) } "DBSubnetQuotaExceededFault" => { CreateDBSubnetGroupError::DBSubnetQuotaExceededFault(String::from( parsed_error.message, )) } "InvalidSubnet" => { CreateDBSubnetGroupError::InvalidSubnet(String::from(parsed_error.message)) } _ => CreateDBSubnetGroupError::Unknown(String::from(body)), }, Err(_) => CreateDBSubnetGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateDBSubnetGroupError { fn from(err: XmlParseError) -> CreateDBSubnetGroupError { let XmlParseError(message) = err; CreateDBSubnetGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateDBSubnetGroupError { fn from(err: CredentialsError) -> CreateDBSubnetGroupError { CreateDBSubnetGroupError::Credentials(err) } } impl From<HttpDispatchError> for CreateDBSubnetGroupError { fn from(err: HttpDispatchError) -> CreateDBSubnetGroupError { CreateDBSubnetGroupError::HttpDispatch(err) } } impl From<io::Error> for CreateDBSubnetGroupError { fn from(err: io::Error) -> CreateDBSubnetGroupError { CreateDBSubnetGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateDBSubnetGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateDBSubnetGroupError { fn description(&self) -> &str { match *self { CreateDBSubnetGroupError::DBSubnetGroupAlreadyExistsFault(ref cause) => cause, CreateDBSubnetGroupError::DBSubnetGroupDoesNotCoverEnoughAZs(ref cause) => cause, CreateDBSubnetGroupError::DBSubnetGroupQuotaExceededFault(ref cause) => cause, CreateDBSubnetGroupError::DBSubnetQuotaExceededFault(ref cause) => cause, CreateDBSubnetGroupError::InvalidSubnet(ref cause) => cause, CreateDBSubnetGroupError::Validation(ref cause) => cause, CreateDBSubnetGroupError::Credentials(ref err) => err.description(), CreateDBSubnetGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateDBSubnetGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by CreateEventSubscription #[derive(Debug, PartialEq)] pub enum CreateEventSubscriptionError { EventSubscriptionQuotaExceededFault(String), SNSInvalidTopicFault(String), SNSNoAuthorizationFault(String), SNSTopicArnNotFoundFault(String), SourceNotFoundFault(String), SubscriptionAlreadyExistFault(String), SubscriptionCategoryNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl CreateEventSubscriptionError { pub fn from_body(body: &str) -> CreateEventSubscriptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "EventSubscriptionQuotaExceeded" => { CreateEventSubscriptionError::EventSubscriptionQuotaExceededFault(String::from( parsed_error.message, )) } "SNSInvalidTopic" => CreateEventSubscriptionError::SNSInvalidTopicFault( String::from(parsed_error.message), ), "SNSNoAuthorization" => CreateEventSubscriptionError::SNSNoAuthorizationFault( String::from(parsed_error.message), ), "SNSTopicArnNotFound" => CreateEventSubscriptionError::SNSTopicArnNotFoundFault( String::from(parsed_error.message), ), "SourceNotFound" => CreateEventSubscriptionError::SourceNotFoundFault( String::from(parsed_error.message), ), "SubscriptionAlreadyExist" => { CreateEventSubscriptionError::SubscriptionAlreadyExistFault(String::from( parsed_error.message, )) } "SubscriptionCategoryNotFound" => { CreateEventSubscriptionError::SubscriptionCategoryNotFoundFault(String::from( parsed_error.message, )) } _ => CreateEventSubscriptionError::Unknown(String::from(body)), }, Err(_) => CreateEventSubscriptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for CreateEventSubscriptionError { fn from(err: XmlParseError) -> CreateEventSubscriptionError { let XmlParseError(message) = err; CreateEventSubscriptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for CreateEventSubscriptionError { fn from(err: CredentialsError) -> CreateEventSubscriptionError { CreateEventSubscriptionError::Credentials(err) } } impl From<HttpDispatchError> for CreateEventSubscriptionError { fn from(err: HttpDispatchError) -> CreateEventSubscriptionError { CreateEventSubscriptionError::HttpDispatch(err) } } impl From<io::Error> for CreateEventSubscriptionError { fn from(err: io::Error) -> CreateEventSubscriptionError { CreateEventSubscriptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for CreateEventSubscriptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for CreateEventSubscriptionError { fn description(&self) -> &str { match *self { CreateEventSubscriptionError::EventSubscriptionQuotaExceededFault(ref cause) => cause, CreateEventSubscriptionError::SNSInvalidTopicFault(ref cause) => cause, CreateEventSubscriptionError::SNSNoAuthorizationFault(ref cause) => cause, CreateEventSubscriptionError::SNSTopicArnNotFoundFault(ref cause) => cause, CreateEventSubscriptionError::SourceNotFoundFault(ref cause) => cause, CreateEventSubscriptionError::SubscriptionAlreadyExistFault(ref cause) => cause, CreateEventSubscriptionError::SubscriptionCategoryNotFoundFault(ref cause) => cause, CreateEventSubscriptionError::Validation(ref cause) => cause, CreateEventSubscriptionError::Credentials(ref err) => err.description(), CreateEventSubscriptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } CreateEventSubscriptionError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteDBCluster #[derive(Debug, PartialEq)] pub enum DeleteDBClusterError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p>User already has a DB cluster snapshot with the given identifier.</p> DBClusterSnapshotAlreadyExistsFault(String), /// <p>The supplied value is not a valid DB cluster snapshot state.</p> InvalidDBClusterSnapshotStateFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// <p>Request would result in user exceeding the allowed number of DB snapshots.</p> SnapshotQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteDBClusterError { pub fn from_body(body: &str) -> DeleteDBClusterError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => { DeleteDBClusterError::DBClusterNotFoundFault(String::from(parsed_error.message)) } "DBClusterSnapshotAlreadyExistsFault" => { DeleteDBClusterError::DBClusterSnapshotAlreadyExistsFault(String::from( parsed_error.message, )) } "InvalidDBClusterSnapshotStateFault" => { DeleteDBClusterError::InvalidDBClusterSnapshotStateFault(String::from( parsed_error.message, )) } "InvalidDBClusterStateFault" => DeleteDBClusterError::InvalidDBClusterStateFault( String::from(parsed_error.message), ), "SnapshotQuotaExceeded" => DeleteDBClusterError::SnapshotQuotaExceededFault( String::from(parsed_error.message), ), _ => DeleteDBClusterError::Unknown(String::from(body)), }, Err(_) => DeleteDBClusterError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteDBClusterError { fn from(err: XmlParseError) -> DeleteDBClusterError { let XmlParseError(message) = err; DeleteDBClusterError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteDBClusterError { fn from(err: CredentialsError) -> DeleteDBClusterError { DeleteDBClusterError::Credentials(err) } } impl From<HttpDispatchError> for DeleteDBClusterError { fn from(err: HttpDispatchError) -> DeleteDBClusterError { DeleteDBClusterError::HttpDispatch(err) } } impl From<io::Error> for DeleteDBClusterError { fn from(err: io::Error) -> DeleteDBClusterError { DeleteDBClusterError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteDBClusterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteDBClusterError { fn description(&self) -> &str { match *self { DeleteDBClusterError::DBClusterNotFoundFault(ref cause) => cause, DeleteDBClusterError::DBClusterSnapshotAlreadyExistsFault(ref cause) => cause, DeleteDBClusterError::InvalidDBClusterSnapshotStateFault(ref cause) => cause, DeleteDBClusterError::InvalidDBClusterStateFault(ref cause) => cause, DeleteDBClusterError::SnapshotQuotaExceededFault(ref cause) => cause, DeleteDBClusterError::Validation(ref cause) => cause, DeleteDBClusterError::Credentials(ref err) => err.description(), DeleteDBClusterError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteDBClusterError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteDBClusterParameterGroup #[derive(Debug, PartialEq)] pub enum DeleteDBClusterParameterGroupError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p>The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you cannot delete it when the parameter group is in this state.</p> InvalidDBParameterGroupStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteDBClusterParameterGroupError { pub fn from_body(body: &str) -> DeleteDBClusterParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { DeleteDBClusterParameterGroupError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBParameterGroupState" => { DeleteDBClusterParameterGroupError::InvalidDBParameterGroupStateFault( String::from(parsed_error.message), ) } _ => DeleteDBClusterParameterGroupError::Unknown(String::from(body)), }, Err(_) => DeleteDBClusterParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteDBClusterParameterGroupError { fn from(err: XmlParseError) -> DeleteDBClusterParameterGroupError { let XmlParseError(message) = err; DeleteDBClusterParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteDBClusterParameterGroupError { fn from(err: CredentialsError) -> DeleteDBClusterParameterGroupError { DeleteDBClusterParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for DeleteDBClusterParameterGroupError { fn from(err: HttpDispatchError) -> DeleteDBClusterParameterGroupError { DeleteDBClusterParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for DeleteDBClusterParameterGroupError { fn from(err: io::Error) -> DeleteDBClusterParameterGroupError { DeleteDBClusterParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteDBClusterParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteDBClusterParameterGroupError { fn description(&self) -> &str { match *self { DeleteDBClusterParameterGroupError::DBParameterGroupNotFoundFault(ref cause) => cause, DeleteDBClusterParameterGroupError::InvalidDBParameterGroupStateFault(ref cause) => { cause } DeleteDBClusterParameterGroupError::Validation(ref cause) => cause, DeleteDBClusterParameterGroupError::Credentials(ref err) => err.description(), DeleteDBClusterParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteDBClusterParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteDBClusterSnapshot #[derive(Debug, PartialEq)] pub enum DeleteDBClusterSnapshotError { /// <p> <i>DBClusterSnapshotIdentifier</i> does not refer to an existing DB cluster snapshot. </p> DBClusterSnapshotNotFoundFault(String), /// <p>The supplied value is not a valid DB cluster snapshot state.</p> InvalidDBClusterSnapshotStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteDBClusterSnapshotError { pub fn from_body(body: &str) -> DeleteDBClusterSnapshotError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterSnapshotNotFoundFault" => { DeleteDBClusterSnapshotError::DBClusterSnapshotNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBClusterSnapshotStateFault" => { DeleteDBClusterSnapshotError::InvalidDBClusterSnapshotStateFault(String::from( parsed_error.message, )) } _ => DeleteDBClusterSnapshotError::Unknown(String::from(body)), }, Err(_) => DeleteDBClusterSnapshotError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteDBClusterSnapshotError { fn from(err: XmlParseError) -> DeleteDBClusterSnapshotError { let XmlParseError(message) = err; DeleteDBClusterSnapshotError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteDBClusterSnapshotError { fn from(err: CredentialsError) -> DeleteDBClusterSnapshotError { DeleteDBClusterSnapshotError::Credentials(err) } } impl From<HttpDispatchError> for DeleteDBClusterSnapshotError { fn from(err: HttpDispatchError) -> DeleteDBClusterSnapshotError { DeleteDBClusterSnapshotError::HttpDispatch(err) } } impl From<io::Error> for DeleteDBClusterSnapshotError { fn from(err: io::Error) -> DeleteDBClusterSnapshotError { DeleteDBClusterSnapshotError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteDBClusterSnapshotError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteDBClusterSnapshotError { fn description(&self) -> &str { match *self { DeleteDBClusterSnapshotError::DBClusterSnapshotNotFoundFault(ref cause) => cause, DeleteDBClusterSnapshotError::InvalidDBClusterSnapshotStateFault(ref cause) => cause, DeleteDBClusterSnapshotError::Validation(ref cause) => cause, DeleteDBClusterSnapshotError::Credentials(ref err) => err.description(), DeleteDBClusterSnapshotError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteDBClusterSnapshotError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteDBInstance #[derive(Debug, PartialEq)] pub enum DeleteDBInstanceError { /// <p> <i>DBInstanceIdentifier</i> does not refer to an existing DB instance. </p> DBInstanceNotFoundFault(String), /// <p> <i>DBSnapshotIdentifier</i> is already used by an existing snapshot. </p> DBSnapshotAlreadyExistsFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// <p> The specified DB instance is not in the <i>available</i> state. </p> InvalidDBInstanceStateFault(String), /// <p>Request would result in user exceeding the allowed number of DB snapshots.</p> SnapshotQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteDBInstanceError { pub fn from_body(body: &str) -> DeleteDBInstanceError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBInstanceNotFound" => DeleteDBInstanceError::DBInstanceNotFoundFault( String::from(parsed_error.message), ), "DBSnapshotAlreadyExists" => DeleteDBInstanceError::DBSnapshotAlreadyExistsFault( String::from(parsed_error.message), ), "InvalidDBClusterStateFault" => DeleteDBInstanceError::InvalidDBClusterStateFault( String::from(parsed_error.message), ), "InvalidDBInstanceState" => DeleteDBInstanceError::InvalidDBInstanceStateFault( String::from(parsed_error.message), ), "SnapshotQuotaExceeded" => DeleteDBInstanceError::SnapshotQuotaExceededFault( String::from(parsed_error.message), ), _ => DeleteDBInstanceError::Unknown(String::from(body)), }, Err(_) => DeleteDBInstanceError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteDBInstanceError { fn from(err: XmlParseError) -> DeleteDBInstanceError { let XmlParseError(message) = err; DeleteDBInstanceError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteDBInstanceError { fn from(err: CredentialsError) -> DeleteDBInstanceError { DeleteDBInstanceError::Credentials(err) } } impl From<HttpDispatchError> for DeleteDBInstanceError { fn from(err: HttpDispatchError) -> DeleteDBInstanceError { DeleteDBInstanceError::HttpDispatch(err) } } impl From<io::Error> for DeleteDBInstanceError { fn from(err: io::Error) -> DeleteDBInstanceError { DeleteDBInstanceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteDBInstanceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteDBInstanceError { fn description(&self) -> &str { match *self { DeleteDBInstanceError::DBInstanceNotFoundFault(ref cause) => cause, DeleteDBInstanceError::DBSnapshotAlreadyExistsFault(ref cause) => cause, DeleteDBInstanceError::InvalidDBClusterStateFault(ref cause) => cause, DeleteDBInstanceError::InvalidDBInstanceStateFault(ref cause) => cause, DeleteDBInstanceError::SnapshotQuotaExceededFault(ref cause) => cause, DeleteDBInstanceError::Validation(ref cause) => cause, DeleteDBInstanceError::Credentials(ref err) => err.description(), DeleteDBInstanceError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DeleteDBInstanceError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteDBParameterGroup #[derive(Debug, PartialEq)] pub enum DeleteDBParameterGroupError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p>The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you cannot delete it when the parameter group is in this state.</p> InvalidDBParameterGroupStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteDBParameterGroupError { pub fn from_body(body: &str) -> DeleteDBParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { DeleteDBParameterGroupError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBParameterGroupState" => { DeleteDBParameterGroupError::InvalidDBParameterGroupStateFault(String::from( parsed_error.message, )) } _ => DeleteDBParameterGroupError::Unknown(String::from(body)), }, Err(_) => DeleteDBParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteDBParameterGroupError { fn from(err: XmlParseError) -> DeleteDBParameterGroupError { let XmlParseError(message) = err; DeleteDBParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteDBParameterGroupError { fn from(err: CredentialsError) -> DeleteDBParameterGroupError { DeleteDBParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for DeleteDBParameterGroupError { fn from(err: HttpDispatchError) -> DeleteDBParameterGroupError { DeleteDBParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for DeleteDBParameterGroupError { fn from(err: io::Error) -> DeleteDBParameterGroupError { DeleteDBParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteDBParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteDBParameterGroupError { fn description(&self) -> &str { match *self { DeleteDBParameterGroupError::DBParameterGroupNotFoundFault(ref cause) => cause, DeleteDBParameterGroupError::InvalidDBParameterGroupStateFault(ref cause) => cause, DeleteDBParameterGroupError::Validation(ref cause) => cause, DeleteDBParameterGroupError::Credentials(ref err) => err.description(), DeleteDBParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteDBParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteDBSubnetGroup #[derive(Debug, PartialEq)] pub enum DeleteDBSubnetGroupError { /// <p> <i>DBSubnetGroupName</i> does not refer to an existing DB subnet group. </p> DBSubnetGroupNotFoundFault(String), /// <p>The DB subnet group cannot be deleted because it is in use.</p> InvalidDBSubnetGroupStateFault(String), /// <p> The DB subnet is not in the <i>available</i> state. </p> InvalidDBSubnetStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteDBSubnetGroupError { pub fn from_body(body: &str) -> DeleteDBSubnetGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBSubnetGroupNotFoundFault" => { DeleteDBSubnetGroupError::DBSubnetGroupNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBSubnetGroupStateFault" => { DeleteDBSubnetGroupError::InvalidDBSubnetGroupStateFault(String::from( parsed_error.message, )) } "InvalidDBSubnetStateFault" => DeleteDBSubnetGroupError::InvalidDBSubnetStateFault( String::from(parsed_error.message), ), _ => DeleteDBSubnetGroupError::Unknown(String::from(body)), }, Err(_) => DeleteDBSubnetGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteDBSubnetGroupError { fn from(err: XmlParseError) -> DeleteDBSubnetGroupError { let XmlParseError(message) = err; DeleteDBSubnetGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteDBSubnetGroupError { fn from(err: CredentialsError) -> DeleteDBSubnetGroupError { DeleteDBSubnetGroupError::Credentials(err) } } impl From<HttpDispatchError> for DeleteDBSubnetGroupError { fn from(err: HttpDispatchError) -> DeleteDBSubnetGroupError { DeleteDBSubnetGroupError::HttpDispatch(err) } } impl From<io::Error> for DeleteDBSubnetGroupError { fn from(err: io::Error) -> DeleteDBSubnetGroupError { DeleteDBSubnetGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteDBSubnetGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteDBSubnetGroupError { fn description(&self) -> &str { match *self { DeleteDBSubnetGroupError::DBSubnetGroupNotFoundFault(ref cause) => cause, DeleteDBSubnetGroupError::InvalidDBSubnetGroupStateFault(ref cause) => cause, DeleteDBSubnetGroupError::InvalidDBSubnetStateFault(ref cause) => cause, DeleteDBSubnetGroupError::Validation(ref cause) => cause, DeleteDBSubnetGroupError::Credentials(ref err) => err.description(), DeleteDBSubnetGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteDBSubnetGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by DeleteEventSubscription #[derive(Debug, PartialEq)] pub enum DeleteEventSubscriptionError { InvalidEventSubscriptionStateFault(String), SubscriptionNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DeleteEventSubscriptionError { pub fn from_body(body: &str) -> DeleteEventSubscriptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "InvalidEventSubscriptionState" => { DeleteEventSubscriptionError::InvalidEventSubscriptionStateFault(String::from( parsed_error.message, )) } "SubscriptionNotFound" => DeleteEventSubscriptionError::SubscriptionNotFoundFault( String::from(parsed_error.message), ), _ => DeleteEventSubscriptionError::Unknown(String::from(body)), }, Err(_) => DeleteEventSubscriptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DeleteEventSubscriptionError { fn from(err: XmlParseError) -> DeleteEventSubscriptionError { let XmlParseError(message) = err; DeleteEventSubscriptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for DeleteEventSubscriptionError { fn from(err: CredentialsError) -> DeleteEventSubscriptionError { DeleteEventSubscriptionError::Credentials(err) } } impl From<HttpDispatchError> for DeleteEventSubscriptionError { fn from(err: HttpDispatchError) -> DeleteEventSubscriptionError { DeleteEventSubscriptionError::HttpDispatch(err) } } impl From<io::Error> for DeleteEventSubscriptionError { fn from(err: io::Error) -> DeleteEventSubscriptionError { DeleteEventSubscriptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DeleteEventSubscriptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DeleteEventSubscriptionError { fn description(&self) -> &str { match *self { DeleteEventSubscriptionError::InvalidEventSubscriptionStateFault(ref cause) => cause, DeleteEventSubscriptionError::SubscriptionNotFoundFault(ref cause) => cause, DeleteEventSubscriptionError::Validation(ref cause) => cause, DeleteEventSubscriptionError::Credentials(ref err) => err.description(), DeleteEventSubscriptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DeleteEventSubscriptionError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBClusterParameterGroups #[derive(Debug, PartialEq)] pub enum DescribeDBClusterParameterGroupsError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBClusterParameterGroupsError { pub fn from_body(body: &str) -> DescribeDBClusterParameterGroupsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { DescribeDBClusterParameterGroupsError::DBParameterGroupNotFoundFault( String::from(parsed_error.message), ) } _ => DescribeDBClusterParameterGroupsError::Unknown(String::from(body)), }, Err(_) => DescribeDBClusterParameterGroupsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBClusterParameterGroupsError { fn from(err: XmlParseError) -> DescribeDBClusterParameterGroupsError { let XmlParseError(message) = err; DescribeDBClusterParameterGroupsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBClusterParameterGroupsError { fn from(err: CredentialsError) -> DescribeDBClusterParameterGroupsError { DescribeDBClusterParameterGroupsError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBClusterParameterGroupsError { fn from(err: HttpDispatchError) -> DescribeDBClusterParameterGroupsError { DescribeDBClusterParameterGroupsError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBClusterParameterGroupsError { fn from(err: io::Error) -> DescribeDBClusterParameterGroupsError { DescribeDBClusterParameterGroupsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBClusterParameterGroupsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBClusterParameterGroupsError { fn description(&self) -> &str { match *self { DescribeDBClusterParameterGroupsError::DBParameterGroupNotFoundFault(ref cause) => { cause } DescribeDBClusterParameterGroupsError::Validation(ref cause) => cause, DescribeDBClusterParameterGroupsError::Credentials(ref err) => err.description(), DescribeDBClusterParameterGroupsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBClusterParameterGroupsError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBClusterParameters #[derive(Debug, PartialEq)] pub enum DescribeDBClusterParametersError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBClusterParametersError { pub fn from_body(body: &str) -> DescribeDBClusterParametersError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { DescribeDBClusterParametersError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } _ => DescribeDBClusterParametersError::Unknown(String::from(body)), }, Err(_) => DescribeDBClusterParametersError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBClusterParametersError { fn from(err: XmlParseError) -> DescribeDBClusterParametersError { let XmlParseError(message) = err; DescribeDBClusterParametersError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBClusterParametersError { fn from(err: CredentialsError) -> DescribeDBClusterParametersError { DescribeDBClusterParametersError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBClusterParametersError { fn from(err: HttpDispatchError) -> DescribeDBClusterParametersError { DescribeDBClusterParametersError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBClusterParametersError { fn from(err: io::Error) -> DescribeDBClusterParametersError { DescribeDBClusterParametersError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBClusterParametersError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBClusterParametersError { fn description(&self) -> &str { match *self { DescribeDBClusterParametersError::DBParameterGroupNotFoundFault(ref cause) => cause, DescribeDBClusterParametersError::Validation(ref cause) => cause, DescribeDBClusterParametersError::Credentials(ref err) => err.description(), DescribeDBClusterParametersError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBClusterParametersError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBClusterSnapshotAttributes #[derive(Debug, PartialEq)] pub enum DescribeDBClusterSnapshotAttributesError { /// <p> <i>DBClusterSnapshotIdentifier</i> does not refer to an existing DB cluster snapshot. </p> DBClusterSnapshotNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBClusterSnapshotAttributesError { pub fn from_body(body: &str) -> DescribeDBClusterSnapshotAttributesError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterSnapshotNotFoundFault" => { DescribeDBClusterSnapshotAttributesError::DBClusterSnapshotNotFoundFault( String::from(parsed_error.message), ) } _ => DescribeDBClusterSnapshotAttributesError::Unknown(String::from(body)), }, Err(_) => DescribeDBClusterSnapshotAttributesError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBClusterSnapshotAttributesError { fn from(err: XmlParseError) -> DescribeDBClusterSnapshotAttributesError { let XmlParseError(message) = err; DescribeDBClusterSnapshotAttributesError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBClusterSnapshotAttributesError { fn from(err: CredentialsError) -> DescribeDBClusterSnapshotAttributesError { DescribeDBClusterSnapshotAttributesError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBClusterSnapshotAttributesError { fn from(err: HttpDispatchError) -> DescribeDBClusterSnapshotAttributesError { DescribeDBClusterSnapshotAttributesError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBClusterSnapshotAttributesError { fn from(err: io::Error) -> DescribeDBClusterSnapshotAttributesError { DescribeDBClusterSnapshotAttributesError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBClusterSnapshotAttributesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBClusterSnapshotAttributesError { fn description(&self) -> &str { match *self { DescribeDBClusterSnapshotAttributesError::DBClusterSnapshotNotFoundFault(ref cause) => { cause } DescribeDBClusterSnapshotAttributesError::Validation(ref cause) => cause, DescribeDBClusterSnapshotAttributesError::Credentials(ref err) => err.description(), DescribeDBClusterSnapshotAttributesError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBClusterSnapshotAttributesError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBClusterSnapshots #[derive(Debug, PartialEq)] pub enum DescribeDBClusterSnapshotsError { /// <p> <i>DBClusterSnapshotIdentifier</i> does not refer to an existing DB cluster snapshot. </p> DBClusterSnapshotNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBClusterSnapshotsError { pub fn from_body(body: &str) -> DescribeDBClusterSnapshotsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterSnapshotNotFoundFault" => { DescribeDBClusterSnapshotsError::DBClusterSnapshotNotFoundFault(String::from( parsed_error.message, )) } _ => DescribeDBClusterSnapshotsError::Unknown(String::from(body)), }, Err(_) => DescribeDBClusterSnapshotsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBClusterSnapshotsError { fn from(err: XmlParseError) -> DescribeDBClusterSnapshotsError { let XmlParseError(message) = err; DescribeDBClusterSnapshotsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBClusterSnapshotsError { fn from(err: CredentialsError) -> DescribeDBClusterSnapshotsError { DescribeDBClusterSnapshotsError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBClusterSnapshotsError { fn from(err: HttpDispatchError) -> DescribeDBClusterSnapshotsError { DescribeDBClusterSnapshotsError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBClusterSnapshotsError { fn from(err: io::Error) -> DescribeDBClusterSnapshotsError { DescribeDBClusterSnapshotsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBClusterSnapshotsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBClusterSnapshotsError { fn description(&self) -> &str { match *self { DescribeDBClusterSnapshotsError::DBClusterSnapshotNotFoundFault(ref cause) => cause, DescribeDBClusterSnapshotsError::Validation(ref cause) => cause, DescribeDBClusterSnapshotsError::Credentials(ref err) => err.description(), DescribeDBClusterSnapshotsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBClusterSnapshotsError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBClusters #[derive(Debug, PartialEq)] pub enum DescribeDBClustersError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBClustersError { pub fn from_body(body: &str) -> DescribeDBClustersError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => DescribeDBClustersError::DBClusterNotFoundFault( String::from(parsed_error.message), ), _ => DescribeDBClustersError::Unknown(String::from(body)), }, Err(_) => DescribeDBClustersError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBClustersError { fn from(err: XmlParseError) -> DescribeDBClustersError { let XmlParseError(message) = err; DescribeDBClustersError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBClustersError { fn from(err: CredentialsError) -> DescribeDBClustersError { DescribeDBClustersError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBClustersError { fn from(err: HttpDispatchError) -> DescribeDBClustersError { DescribeDBClustersError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBClustersError { fn from(err: io::Error) -> DescribeDBClustersError { DescribeDBClustersError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBClustersError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBClustersError { fn description(&self) -> &str { match *self { DescribeDBClustersError::DBClusterNotFoundFault(ref cause) => cause, DescribeDBClustersError::Validation(ref cause) => cause, DescribeDBClustersError::Credentials(ref err) => err.description(), DescribeDBClustersError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBClustersError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBEngineVersions #[derive(Debug, PartialEq)] pub enum DescribeDBEngineVersionsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBEngineVersionsError { pub fn from_body(body: &str) -> DescribeDBEngineVersionsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DescribeDBEngineVersionsError::Unknown(String::from(body)), }, Err(_) => DescribeDBEngineVersionsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBEngineVersionsError { fn from(err: XmlParseError) -> DescribeDBEngineVersionsError { let XmlParseError(message) = err; DescribeDBEngineVersionsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBEngineVersionsError { fn from(err: CredentialsError) -> DescribeDBEngineVersionsError { DescribeDBEngineVersionsError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBEngineVersionsError { fn from(err: HttpDispatchError) -> DescribeDBEngineVersionsError { DescribeDBEngineVersionsError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBEngineVersionsError { fn from(err: io::Error) -> DescribeDBEngineVersionsError { DescribeDBEngineVersionsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBEngineVersionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBEngineVersionsError { fn description(&self) -> &str { match *self { DescribeDBEngineVersionsError::Validation(ref cause) => cause, DescribeDBEngineVersionsError::Credentials(ref err) => err.description(), DescribeDBEngineVersionsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBEngineVersionsError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBInstances #[derive(Debug, PartialEq)] pub enum DescribeDBInstancesError { /// <p> <i>DBInstanceIdentifier</i> does not refer to an existing DB instance. </p> DBInstanceNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBInstancesError { pub fn from_body(body: &str) -> DescribeDBInstancesError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBInstanceNotFound" => DescribeDBInstancesError::DBInstanceNotFoundFault( String::from(parsed_error.message), ), _ => DescribeDBInstancesError::Unknown(String::from(body)), }, Err(_) => DescribeDBInstancesError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBInstancesError { fn from(err: XmlParseError) -> DescribeDBInstancesError { let XmlParseError(message) = err; DescribeDBInstancesError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBInstancesError { fn from(err: CredentialsError) -> DescribeDBInstancesError { DescribeDBInstancesError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBInstancesError { fn from(err: HttpDispatchError) -> DescribeDBInstancesError { DescribeDBInstancesError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBInstancesError { fn from(err: io::Error) -> DescribeDBInstancesError { DescribeDBInstancesError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBInstancesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBInstancesError { fn description(&self) -> &str { match *self { DescribeDBInstancesError::DBInstanceNotFoundFault(ref cause) => cause, DescribeDBInstancesError::Validation(ref cause) => cause, DescribeDBInstancesError::Credentials(ref err) => err.description(), DescribeDBInstancesError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBInstancesError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBParameterGroups #[derive(Debug, PartialEq)] pub enum DescribeDBParameterGroupsError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBParameterGroupsError { pub fn from_body(body: &str) -> DescribeDBParameterGroupsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { DescribeDBParameterGroupsError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } _ => DescribeDBParameterGroupsError::Unknown(String::from(body)), }, Err(_) => DescribeDBParameterGroupsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBParameterGroupsError { fn from(err: XmlParseError) -> DescribeDBParameterGroupsError { let XmlParseError(message) = err; DescribeDBParameterGroupsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBParameterGroupsError { fn from(err: CredentialsError) -> DescribeDBParameterGroupsError { DescribeDBParameterGroupsError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBParameterGroupsError { fn from(err: HttpDispatchError) -> DescribeDBParameterGroupsError { DescribeDBParameterGroupsError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBParameterGroupsError { fn from(err: io::Error) -> DescribeDBParameterGroupsError { DescribeDBParameterGroupsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBParameterGroupsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBParameterGroupsError { fn description(&self) -> &str { match *self { DescribeDBParameterGroupsError::DBParameterGroupNotFoundFault(ref cause) => cause, DescribeDBParameterGroupsError::Validation(ref cause) => cause, DescribeDBParameterGroupsError::Credentials(ref err) => err.description(), DescribeDBParameterGroupsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBParameterGroupsError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBParameters #[derive(Debug, PartialEq)] pub enum DescribeDBParametersError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBParametersError { pub fn from_body(body: &str) -> DescribeDBParametersError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { DescribeDBParametersError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } _ => DescribeDBParametersError::Unknown(String::from(body)), }, Err(_) => DescribeDBParametersError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBParametersError { fn from(err: XmlParseError) -> DescribeDBParametersError { let XmlParseError(message) = err; DescribeDBParametersError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBParametersError { fn from(err: CredentialsError) -> DescribeDBParametersError { DescribeDBParametersError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBParametersError { fn from(err: HttpDispatchError) -> DescribeDBParametersError { DescribeDBParametersError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBParametersError { fn from(err: io::Error) -> DescribeDBParametersError { DescribeDBParametersError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBParametersError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBParametersError { fn description(&self) -> &str { match *self { DescribeDBParametersError::DBParameterGroupNotFoundFault(ref cause) => cause, DescribeDBParametersError::Validation(ref cause) => cause, DescribeDBParametersError::Credentials(ref err) => err.description(), DescribeDBParametersError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBParametersError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeDBSubnetGroups #[derive(Debug, PartialEq)] pub enum DescribeDBSubnetGroupsError { /// <p> <i>DBSubnetGroupName</i> does not refer to an existing DB subnet group. </p> DBSubnetGroupNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeDBSubnetGroupsError { pub fn from_body(body: &str) -> DescribeDBSubnetGroupsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBSubnetGroupNotFoundFault" => { DescribeDBSubnetGroupsError::DBSubnetGroupNotFoundFault(String::from( parsed_error.message, )) } _ => DescribeDBSubnetGroupsError::Unknown(String::from(body)), }, Err(_) => DescribeDBSubnetGroupsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeDBSubnetGroupsError { fn from(err: XmlParseError) -> DescribeDBSubnetGroupsError { let XmlParseError(message) = err; DescribeDBSubnetGroupsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeDBSubnetGroupsError { fn from(err: CredentialsError) -> DescribeDBSubnetGroupsError { DescribeDBSubnetGroupsError::Credentials(err) } } impl From<HttpDispatchError> for DescribeDBSubnetGroupsError { fn from(err: HttpDispatchError) -> DescribeDBSubnetGroupsError { DescribeDBSubnetGroupsError::HttpDispatch(err) } } impl From<io::Error> for DescribeDBSubnetGroupsError { fn from(err: io::Error) -> DescribeDBSubnetGroupsError { DescribeDBSubnetGroupsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeDBSubnetGroupsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeDBSubnetGroupsError { fn description(&self) -> &str { match *self { DescribeDBSubnetGroupsError::DBSubnetGroupNotFoundFault(ref cause) => cause, DescribeDBSubnetGroupsError::Validation(ref cause) => cause, DescribeDBSubnetGroupsError::Credentials(ref err) => err.description(), DescribeDBSubnetGroupsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeDBSubnetGroupsError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeEngineDefaultClusterParameters #[derive(Debug, PartialEq)] pub enum DescribeEngineDefaultClusterParametersError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeEngineDefaultClusterParametersError { pub fn from_body(body: &str) -> DescribeEngineDefaultClusterParametersError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DescribeEngineDefaultClusterParametersError::Unknown(String::from(body)), }, Err(_) => DescribeEngineDefaultClusterParametersError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeEngineDefaultClusterParametersError { fn from(err: XmlParseError) -> DescribeEngineDefaultClusterParametersError { let XmlParseError(message) = err; DescribeEngineDefaultClusterParametersError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeEngineDefaultClusterParametersError { fn from(err: CredentialsError) -> DescribeEngineDefaultClusterParametersError { DescribeEngineDefaultClusterParametersError::Credentials(err) } } impl From<HttpDispatchError> for DescribeEngineDefaultClusterParametersError { fn from(err: HttpDispatchError) -> DescribeEngineDefaultClusterParametersError { DescribeEngineDefaultClusterParametersError::HttpDispatch(err) } } impl From<io::Error> for DescribeEngineDefaultClusterParametersError { fn from(err: io::Error) -> DescribeEngineDefaultClusterParametersError { DescribeEngineDefaultClusterParametersError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeEngineDefaultClusterParametersError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeEngineDefaultClusterParametersError { fn description(&self) -> &str { match *self { DescribeEngineDefaultClusterParametersError::Validation(ref cause) => cause, DescribeEngineDefaultClusterParametersError::Credentials(ref err) => err.description(), DescribeEngineDefaultClusterParametersError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeEngineDefaultClusterParametersError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeEngineDefaultParameters #[derive(Debug, PartialEq)] pub enum DescribeEngineDefaultParametersError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeEngineDefaultParametersError { pub fn from_body(body: &str) -> DescribeEngineDefaultParametersError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DescribeEngineDefaultParametersError::Unknown(String::from(body)), }, Err(_) => DescribeEngineDefaultParametersError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeEngineDefaultParametersError { fn from(err: XmlParseError) -> DescribeEngineDefaultParametersError { let XmlParseError(message) = err; DescribeEngineDefaultParametersError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeEngineDefaultParametersError { fn from(err: CredentialsError) -> DescribeEngineDefaultParametersError { DescribeEngineDefaultParametersError::Credentials(err) } } impl From<HttpDispatchError> for DescribeEngineDefaultParametersError { fn from(err: HttpDispatchError) -> DescribeEngineDefaultParametersError { DescribeEngineDefaultParametersError::HttpDispatch(err) } } impl From<io::Error> for DescribeEngineDefaultParametersError { fn from(err: io::Error) -> DescribeEngineDefaultParametersError { DescribeEngineDefaultParametersError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeEngineDefaultParametersError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeEngineDefaultParametersError { fn description(&self) -> &str { match *self { DescribeEngineDefaultParametersError::Validation(ref cause) => cause, DescribeEngineDefaultParametersError::Credentials(ref err) => err.description(), DescribeEngineDefaultParametersError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeEngineDefaultParametersError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeEventCategories #[derive(Debug, PartialEq)] pub enum DescribeEventCategoriesError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeEventCategoriesError { pub fn from_body(body: &str) -> DescribeEventCategoriesError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DescribeEventCategoriesError::Unknown(String::from(body)), }, Err(_) => DescribeEventCategoriesError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeEventCategoriesError { fn from(err: XmlParseError) -> DescribeEventCategoriesError { let XmlParseError(message) = err; DescribeEventCategoriesError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeEventCategoriesError { fn from(err: CredentialsError) -> DescribeEventCategoriesError { DescribeEventCategoriesError::Credentials(err) } } impl From<HttpDispatchError> for DescribeEventCategoriesError { fn from(err: HttpDispatchError) -> DescribeEventCategoriesError { DescribeEventCategoriesError::HttpDispatch(err) } } impl From<io::Error> for DescribeEventCategoriesError { fn from(err: io::Error) -> DescribeEventCategoriesError { DescribeEventCategoriesError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeEventCategoriesError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeEventCategoriesError { fn description(&self) -> &str { match *self { DescribeEventCategoriesError::Validation(ref cause) => cause, DescribeEventCategoriesError::Credentials(ref err) => err.description(), DescribeEventCategoriesError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeEventCategoriesError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeEventSubscriptions #[derive(Debug, PartialEq)] pub enum DescribeEventSubscriptionsError { SubscriptionNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeEventSubscriptionsError { pub fn from_body(body: &str) -> DescribeEventSubscriptionsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "SubscriptionNotFound" => { DescribeEventSubscriptionsError::SubscriptionNotFoundFault(String::from( parsed_error.message, )) } _ => DescribeEventSubscriptionsError::Unknown(String::from(body)), }, Err(_) => DescribeEventSubscriptionsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeEventSubscriptionsError { fn from(err: XmlParseError) -> DescribeEventSubscriptionsError { let XmlParseError(message) = err; DescribeEventSubscriptionsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeEventSubscriptionsError { fn from(err: CredentialsError) -> DescribeEventSubscriptionsError { DescribeEventSubscriptionsError::Credentials(err) } } impl From<HttpDispatchError> for DescribeEventSubscriptionsError { fn from(err: HttpDispatchError) -> DescribeEventSubscriptionsError { DescribeEventSubscriptionsError::HttpDispatch(err) } } impl From<io::Error> for DescribeEventSubscriptionsError { fn from(err: io::Error) -> DescribeEventSubscriptionsError { DescribeEventSubscriptionsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeEventSubscriptionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeEventSubscriptionsError { fn description(&self) -> &str { match *self { DescribeEventSubscriptionsError::SubscriptionNotFoundFault(ref cause) => cause, DescribeEventSubscriptionsError::Validation(ref cause) => cause, DescribeEventSubscriptionsError::Credentials(ref err) => err.description(), DescribeEventSubscriptionsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeEventSubscriptionsError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeEvents #[derive(Debug, PartialEq)] pub enum DescribeEventsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeEventsError { pub fn from_body(body: &str) -> DescribeEventsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DescribeEventsError::Unknown(String::from(body)), }, Err(_) => DescribeEventsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeEventsError { fn from(err: XmlParseError) -> DescribeEventsError { let XmlParseError(message) = err; DescribeEventsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeEventsError { fn from(err: CredentialsError) -> DescribeEventsError { DescribeEventsError::Credentials(err) } } impl From<HttpDispatchError> for DescribeEventsError { fn from(err: HttpDispatchError) -> DescribeEventsError { DescribeEventsError::HttpDispatch(err) } } impl From<io::Error> for DescribeEventsError { fn from(err: io::Error) -> DescribeEventsError { DescribeEventsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeEventsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeEventsError { fn description(&self) -> &str { match *self { DescribeEventsError::Validation(ref cause) => cause, DescribeEventsError::Credentials(ref err) => err.description(), DescribeEventsError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), DescribeEventsError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeOrderableDBInstanceOptions #[derive(Debug, PartialEq)] pub enum DescribeOrderableDBInstanceOptionsError { /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeOrderableDBInstanceOptionsError { pub fn from_body(body: &str) -> DescribeOrderableDBInstanceOptionsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { _ => DescribeOrderableDBInstanceOptionsError::Unknown(String::from(body)), }, Err(_) => DescribeOrderableDBInstanceOptionsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeOrderableDBInstanceOptionsError { fn from(err: XmlParseError) -> DescribeOrderableDBInstanceOptionsError { let XmlParseError(message) = err; DescribeOrderableDBInstanceOptionsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeOrderableDBInstanceOptionsError { fn from(err: CredentialsError) -> DescribeOrderableDBInstanceOptionsError { DescribeOrderableDBInstanceOptionsError::Credentials(err) } } impl From<HttpDispatchError> for DescribeOrderableDBInstanceOptionsError { fn from(err: HttpDispatchError) -> DescribeOrderableDBInstanceOptionsError { DescribeOrderableDBInstanceOptionsError::HttpDispatch(err) } } impl From<io::Error> for DescribeOrderableDBInstanceOptionsError { fn from(err: io::Error) -> DescribeOrderableDBInstanceOptionsError { DescribeOrderableDBInstanceOptionsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeOrderableDBInstanceOptionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeOrderableDBInstanceOptionsError { fn description(&self) -> &str { match *self { DescribeOrderableDBInstanceOptionsError::Validation(ref cause) => cause, DescribeOrderableDBInstanceOptionsError::Credentials(ref err) => err.description(), DescribeOrderableDBInstanceOptionsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeOrderableDBInstanceOptionsError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribePendingMaintenanceActions #[derive(Debug, PartialEq)] pub enum DescribePendingMaintenanceActionsError { /// <p>The specified resource ID was not found.</p> ResourceNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribePendingMaintenanceActionsError { pub fn from_body(body: &str) -> DescribePendingMaintenanceActionsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "ResourceNotFoundFault" => { DescribePendingMaintenanceActionsError::ResourceNotFoundFault(String::from( parsed_error.message, )) } _ => DescribePendingMaintenanceActionsError::Unknown(String::from(body)), }, Err(_) => DescribePendingMaintenanceActionsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribePendingMaintenanceActionsError { fn from(err: XmlParseError) -> DescribePendingMaintenanceActionsError { let XmlParseError(message) = err; DescribePendingMaintenanceActionsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribePendingMaintenanceActionsError { fn from(err: CredentialsError) -> DescribePendingMaintenanceActionsError { DescribePendingMaintenanceActionsError::Credentials(err) } } impl From<HttpDispatchError> for DescribePendingMaintenanceActionsError { fn from(err: HttpDispatchError) -> DescribePendingMaintenanceActionsError { DescribePendingMaintenanceActionsError::HttpDispatch(err) } } impl From<io::Error> for DescribePendingMaintenanceActionsError { fn from(err: io::Error) -> DescribePendingMaintenanceActionsError { DescribePendingMaintenanceActionsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribePendingMaintenanceActionsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribePendingMaintenanceActionsError { fn description(&self) -> &str { match *self { DescribePendingMaintenanceActionsError::ResourceNotFoundFault(ref cause) => cause, DescribePendingMaintenanceActionsError::Validation(ref cause) => cause, DescribePendingMaintenanceActionsError::Credentials(ref err) => err.description(), DescribePendingMaintenanceActionsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribePendingMaintenanceActionsError::Unknown(ref cause) => cause, } } } /// Errors returned by DescribeValidDBInstanceModifications #[derive(Debug, PartialEq)] pub enum DescribeValidDBInstanceModificationsError { /// <p> <i>DBInstanceIdentifier</i> does not refer to an existing DB instance. </p> DBInstanceNotFoundFault(String), /// <p> The specified DB instance is not in the <i>available</i> state. </p> InvalidDBInstanceStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl DescribeValidDBInstanceModificationsError { pub fn from_body(body: &str) -> DescribeValidDBInstanceModificationsError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBInstanceNotFound" => { DescribeValidDBInstanceModificationsError::DBInstanceNotFoundFault( String::from(parsed_error.message), ) } "InvalidDBInstanceState" => { DescribeValidDBInstanceModificationsError::InvalidDBInstanceStateFault( String::from(parsed_error.message), ) } _ => DescribeValidDBInstanceModificationsError::Unknown(String::from(body)), }, Err(_) => DescribeValidDBInstanceModificationsError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for DescribeValidDBInstanceModificationsError { fn from(err: XmlParseError) -> DescribeValidDBInstanceModificationsError { let XmlParseError(message) = err; DescribeValidDBInstanceModificationsError::Unknown(message.to_string()) } } impl From<CredentialsError> for DescribeValidDBInstanceModificationsError { fn from(err: CredentialsError) -> DescribeValidDBInstanceModificationsError { DescribeValidDBInstanceModificationsError::Credentials(err) } } impl From<HttpDispatchError> for DescribeValidDBInstanceModificationsError { fn from(err: HttpDispatchError) -> DescribeValidDBInstanceModificationsError { DescribeValidDBInstanceModificationsError::HttpDispatch(err) } } impl From<io::Error> for DescribeValidDBInstanceModificationsError { fn from(err: io::Error) -> DescribeValidDBInstanceModificationsError { DescribeValidDBInstanceModificationsError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for DescribeValidDBInstanceModificationsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for DescribeValidDBInstanceModificationsError { fn description(&self) -> &str { match *self { DescribeValidDBInstanceModificationsError::DBInstanceNotFoundFault(ref cause) => cause, DescribeValidDBInstanceModificationsError::InvalidDBInstanceStateFault(ref cause) => { cause } DescribeValidDBInstanceModificationsError::Validation(ref cause) => cause, DescribeValidDBInstanceModificationsError::Credentials(ref err) => err.description(), DescribeValidDBInstanceModificationsError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } DescribeValidDBInstanceModificationsError::Unknown(ref cause) => cause, } } } /// Errors returned by FailoverDBCluster #[derive(Debug, PartialEq)] pub enum FailoverDBClusterError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// <p> The specified DB instance is not in the <i>available</i> state. </p> InvalidDBInstanceStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl FailoverDBClusterError { pub fn from_body(body: &str) -> FailoverDBClusterError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => FailoverDBClusterError::DBClusterNotFoundFault( String::from(parsed_error.message), ), "InvalidDBClusterStateFault" => FailoverDBClusterError::InvalidDBClusterStateFault( String::from(parsed_error.message), ), "InvalidDBInstanceState" => FailoverDBClusterError::InvalidDBInstanceStateFault( String::from(parsed_error.message), ), _ => FailoverDBClusterError::Unknown(String::from(body)), }, Err(_) => FailoverDBClusterError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for FailoverDBClusterError { fn from(err: XmlParseError) -> FailoverDBClusterError { let XmlParseError(message) = err; FailoverDBClusterError::Unknown(message.to_string()) } } impl From<CredentialsError> for FailoverDBClusterError { fn from(err: CredentialsError) -> FailoverDBClusterError { FailoverDBClusterError::Credentials(err) } } impl From<HttpDispatchError> for FailoverDBClusterError { fn from(err: HttpDispatchError) -> FailoverDBClusterError { FailoverDBClusterError::HttpDispatch(err) } } impl From<io::Error> for FailoverDBClusterError { fn from(err: io::Error) -> FailoverDBClusterError { FailoverDBClusterError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for FailoverDBClusterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for FailoverDBClusterError { fn description(&self) -> &str { match *self { FailoverDBClusterError::DBClusterNotFoundFault(ref cause) => cause, FailoverDBClusterError::InvalidDBClusterStateFault(ref cause) => cause, FailoverDBClusterError::InvalidDBInstanceStateFault(ref cause) => cause, FailoverDBClusterError::Validation(ref cause) => cause, FailoverDBClusterError::Credentials(ref err) => err.description(), FailoverDBClusterError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } FailoverDBClusterError::Unknown(ref cause) => cause, } } } /// Errors returned by ListTagsForResource #[derive(Debug, PartialEq)] pub enum ListTagsForResourceError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p> <i>DBInstanceIdentifier</i> does not refer to an existing DB instance. </p> DBInstanceNotFoundFault(String), /// <p> <i>DBSnapshotIdentifier</i> does not refer to an existing DB snapshot. </p> DBSnapshotNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ListTagsForResourceError { pub fn from_body(body: &str) -> ListTagsForResourceError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => ListTagsForResourceError::DBClusterNotFoundFault( String::from(parsed_error.message), ), "DBInstanceNotFound" => ListTagsForResourceError::DBInstanceNotFoundFault( String::from(parsed_error.message), ), "DBSnapshotNotFound" => ListTagsForResourceError::DBSnapshotNotFoundFault( String::from(parsed_error.message), ), _ => ListTagsForResourceError::Unknown(String::from(body)), }, Err(_) => ListTagsForResourceError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ListTagsForResourceError { fn from(err: XmlParseError) -> ListTagsForResourceError { let XmlParseError(message) = err; ListTagsForResourceError::Unknown(message.to_string()) } } impl From<CredentialsError> for ListTagsForResourceError { fn from(err: CredentialsError) -> ListTagsForResourceError { ListTagsForResourceError::Credentials(err) } } impl From<HttpDispatchError> for ListTagsForResourceError { fn from(err: HttpDispatchError) -> ListTagsForResourceError { ListTagsForResourceError::HttpDispatch(err) } } impl From<io::Error> for ListTagsForResourceError { fn from(err: io::Error) -> ListTagsForResourceError { ListTagsForResourceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ListTagsForResourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ListTagsForResourceError { fn description(&self) -> &str { match *self { ListTagsForResourceError::DBClusterNotFoundFault(ref cause) => cause, ListTagsForResourceError::DBInstanceNotFoundFault(ref cause) => cause, ListTagsForResourceError::DBSnapshotNotFoundFault(ref cause) => cause, ListTagsForResourceError::Validation(ref cause) => cause, ListTagsForResourceError::Credentials(ref err) => err.description(), ListTagsForResourceError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ListTagsForResourceError::Unknown(ref cause) => cause, } } } /// Errors returned by ModifyDBCluster #[derive(Debug, PartialEq)] pub enum ModifyDBClusterError { /// <p>User already has a DB cluster with the given identifier.</p> DBClusterAlreadyExistsFault(String), /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p> <i>DBClusterParameterGroupName</i> does not refer to an existing DB Cluster parameter group. </p> DBClusterParameterGroupNotFoundFault(String), /// <p> <i>DBSubnetGroupName</i> does not refer to an existing DB subnet group. </p> DBSubnetGroupNotFoundFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// <p> The specified DB instance is not in the <i>available</i> state. </p> InvalidDBInstanceStateFault(String), /// <p>The state of the DB security group does not allow deletion.</p> InvalidDBSecurityGroupStateFault(String), /// <p>The DB subnet group cannot be deleted because it is in use.</p> InvalidDBSubnetGroupStateFault(String), /// <p>The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.</p> InvalidSubnet(String), /// <p>DB subnet group does not cover all Availability Zones after it is created because users' change.</p> InvalidVPCNetworkStateFault(String), /// <p>Request would result in user exceeding the allowed amount of storage available across all DB instances.</p> StorageQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ModifyDBClusterError { pub fn from_body(body: &str) -> ModifyDBClusterError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterAlreadyExistsFault" => ModifyDBClusterError::DBClusterAlreadyExistsFault( String::from(parsed_error.message), ), "DBClusterNotFoundFault" => { ModifyDBClusterError::DBClusterNotFoundFault(String::from(parsed_error.message)) } "DBClusterParameterGroupNotFound" => { ModifyDBClusterError::DBClusterParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "DBSubnetGroupNotFoundFault" => ModifyDBClusterError::DBSubnetGroupNotFoundFault( String::from(parsed_error.message), ), "InvalidDBClusterStateFault" => ModifyDBClusterError::InvalidDBClusterStateFault( String::from(parsed_error.message), ), "InvalidDBInstanceState" => ModifyDBClusterError::InvalidDBInstanceStateFault( String::from(parsed_error.message), ), "InvalidDBSecurityGroupState" => { ModifyDBClusterError::InvalidDBSecurityGroupStateFault(String::from( parsed_error.message, )) } "InvalidDBSubnetGroupStateFault" => { ModifyDBClusterError::InvalidDBSubnetGroupStateFault(String::from( parsed_error.message, )) } "InvalidSubnet" => { ModifyDBClusterError::InvalidSubnet(String::from(parsed_error.message)) } "InvalidVPCNetworkStateFault" => ModifyDBClusterError::InvalidVPCNetworkStateFault( String::from(parsed_error.message), ), "StorageQuotaExceeded" => ModifyDBClusterError::StorageQuotaExceededFault( String::from(parsed_error.message), ), _ => ModifyDBClusterError::Unknown(String::from(body)), }, Err(_) => ModifyDBClusterError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ModifyDBClusterError { fn from(err: XmlParseError) -> ModifyDBClusterError { let XmlParseError(message) = err; ModifyDBClusterError::Unknown(message.to_string()) } } impl From<CredentialsError> for ModifyDBClusterError { fn from(err: CredentialsError) -> ModifyDBClusterError { ModifyDBClusterError::Credentials(err) } } impl From<HttpDispatchError> for ModifyDBClusterError { fn from(err: HttpDispatchError) -> ModifyDBClusterError { ModifyDBClusterError::HttpDispatch(err) } } impl From<io::Error> for ModifyDBClusterError { fn from(err: io::Error) -> ModifyDBClusterError { ModifyDBClusterError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ModifyDBClusterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ModifyDBClusterError { fn description(&self) -> &str { match *self { ModifyDBClusterError::DBClusterAlreadyExistsFault(ref cause) => cause, ModifyDBClusterError::DBClusterNotFoundFault(ref cause) => cause, ModifyDBClusterError::DBClusterParameterGroupNotFoundFault(ref cause) => cause, ModifyDBClusterError::DBSubnetGroupNotFoundFault(ref cause) => cause, ModifyDBClusterError::InvalidDBClusterStateFault(ref cause) => cause, ModifyDBClusterError::InvalidDBInstanceStateFault(ref cause) => cause, ModifyDBClusterError::InvalidDBSecurityGroupStateFault(ref cause) => cause, ModifyDBClusterError::InvalidDBSubnetGroupStateFault(ref cause) => cause, ModifyDBClusterError::InvalidSubnet(ref cause) => cause, ModifyDBClusterError::InvalidVPCNetworkStateFault(ref cause) => cause, ModifyDBClusterError::StorageQuotaExceededFault(ref cause) => cause, ModifyDBClusterError::Validation(ref cause) => cause, ModifyDBClusterError::Credentials(ref err) => err.description(), ModifyDBClusterError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ModifyDBClusterError::Unknown(ref cause) => cause, } } } /// Errors returned by ModifyDBClusterParameterGroup #[derive(Debug, PartialEq)] pub enum ModifyDBClusterParameterGroupError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p>The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you cannot delete it when the parameter group is in this state.</p> InvalidDBParameterGroupStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ModifyDBClusterParameterGroupError { pub fn from_body(body: &str) -> ModifyDBClusterParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { ModifyDBClusterParameterGroupError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBParameterGroupState" => { ModifyDBClusterParameterGroupError::InvalidDBParameterGroupStateFault( String::from(parsed_error.message), ) } _ => ModifyDBClusterParameterGroupError::Unknown(String::from(body)), }, Err(_) => ModifyDBClusterParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ModifyDBClusterParameterGroupError { fn from(err: XmlParseError) -> ModifyDBClusterParameterGroupError { let XmlParseError(message) = err; ModifyDBClusterParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for ModifyDBClusterParameterGroupError { fn from(err: CredentialsError) -> ModifyDBClusterParameterGroupError { ModifyDBClusterParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for ModifyDBClusterParameterGroupError { fn from(err: HttpDispatchError) -> ModifyDBClusterParameterGroupError { ModifyDBClusterParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for ModifyDBClusterParameterGroupError { fn from(err: io::Error) -> ModifyDBClusterParameterGroupError { ModifyDBClusterParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ModifyDBClusterParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ModifyDBClusterParameterGroupError { fn description(&self) -> &str { match *self { ModifyDBClusterParameterGroupError::DBParameterGroupNotFoundFault(ref cause) => cause, ModifyDBClusterParameterGroupError::InvalidDBParameterGroupStateFault(ref cause) => { cause } ModifyDBClusterParameterGroupError::Validation(ref cause) => cause, ModifyDBClusterParameterGroupError::Credentials(ref err) => err.description(), ModifyDBClusterParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ModifyDBClusterParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by ModifyDBClusterSnapshotAttribute #[derive(Debug, PartialEq)] pub enum ModifyDBClusterSnapshotAttributeError { /// <p> <i>DBClusterSnapshotIdentifier</i> does not refer to an existing DB cluster snapshot. </p> DBClusterSnapshotNotFoundFault(String), /// <p>The supplied value is not a valid DB cluster snapshot state.</p> InvalidDBClusterSnapshotStateFault(String), /// <p>You have exceeded the maximum number of accounts that you can share a manual DB snapshot with.</p> SharedSnapshotQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ModifyDBClusterSnapshotAttributeError { pub fn from_body(body: &str) -> ModifyDBClusterSnapshotAttributeError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterSnapshotNotFoundFault" => { ModifyDBClusterSnapshotAttributeError::DBClusterSnapshotNotFoundFault( String::from(parsed_error.message), ) } "InvalidDBClusterSnapshotStateFault" => { ModifyDBClusterSnapshotAttributeError::InvalidDBClusterSnapshotStateFault( String::from(parsed_error.message), ) } "SharedSnapshotQuotaExceeded" => { ModifyDBClusterSnapshotAttributeError::SharedSnapshotQuotaExceededFault( String::from(parsed_error.message), ) } _ => ModifyDBClusterSnapshotAttributeError::Unknown(String::from(body)), }, Err(_) => ModifyDBClusterSnapshotAttributeError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ModifyDBClusterSnapshotAttributeError { fn from(err: XmlParseError) -> ModifyDBClusterSnapshotAttributeError { let XmlParseError(message) = err; ModifyDBClusterSnapshotAttributeError::Unknown(message.to_string()) } } impl From<CredentialsError> for ModifyDBClusterSnapshotAttributeError { fn from(err: CredentialsError) -> ModifyDBClusterSnapshotAttributeError { ModifyDBClusterSnapshotAttributeError::Credentials(err) } } impl From<HttpDispatchError> for ModifyDBClusterSnapshotAttributeError { fn from(err: HttpDispatchError) -> ModifyDBClusterSnapshotAttributeError { ModifyDBClusterSnapshotAttributeError::HttpDispatch(err) } } impl From<io::Error> for ModifyDBClusterSnapshotAttributeError { fn from(err: io::Error) -> ModifyDBClusterSnapshotAttributeError { ModifyDBClusterSnapshotAttributeError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ModifyDBClusterSnapshotAttributeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ModifyDBClusterSnapshotAttributeError { fn description(&self) -> &str { match *self { ModifyDBClusterSnapshotAttributeError::DBClusterSnapshotNotFoundFault(ref cause) => { cause } ModifyDBClusterSnapshotAttributeError::InvalidDBClusterSnapshotStateFault( ref cause, ) => cause, ModifyDBClusterSnapshotAttributeError::SharedSnapshotQuotaExceededFault(ref cause) => { cause } ModifyDBClusterSnapshotAttributeError::Validation(ref cause) => cause, ModifyDBClusterSnapshotAttributeError::Credentials(ref err) => err.description(), ModifyDBClusterSnapshotAttributeError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ModifyDBClusterSnapshotAttributeError::Unknown(ref cause) => cause, } } } /// Errors returned by ModifyDBInstance #[derive(Debug, PartialEq)] pub enum ModifyDBInstanceError { /// <p>Specified CIDRIP or EC2 security group is not authorized for the specified DB security group.</p> <p>Neptune may not also be authorized via IAM to perform necessary actions on your behalf.</p> AuthorizationNotFoundFault(String), /// <p> <i>CertificateIdentifier</i> does not refer to an existing certificate. </p> CertificateNotFoundFault(String), /// <p>User already has a DB instance with the given identifier.</p> DBInstanceAlreadyExistsFault(String), /// <p> <i>DBInstanceIdentifier</i> does not refer to an existing DB instance. </p> DBInstanceNotFoundFault(String), /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p> <i>DBSecurityGroupName</i> does not refer to an existing DB security group. </p> DBSecurityGroupNotFoundFault(String), /// <p>The DB upgrade failed because a resource the DB depends on could not be modified.</p> DBUpgradeDependencyFailureFault(String), /// <p> <i>Domain</i> does not refer to an existing Active Directory Domain. </p> DomainNotFoundFault(String), /// <p>Specified DB instance class is not available in the specified Availability Zone.</p> InsufficientDBInstanceCapacityFault(String), /// <p> The specified DB instance is not in the <i>available</i> state. </p> InvalidDBInstanceStateFault(String), /// <p>The state of the DB security group does not allow deletion.</p> InvalidDBSecurityGroupStateFault(String), /// <p>DB subnet group does not cover all Availability Zones after it is created because users' change.</p> InvalidVPCNetworkStateFault(String), OptionGroupNotFoundFault(String), /// <p>Provisioned IOPS not available in the specified Availability Zone.</p> ProvisionedIopsNotAvailableInAZFault(String), /// <p>Request would result in user exceeding the allowed amount of storage available across all DB instances.</p> StorageQuotaExceededFault(String), /// <p> <i>StorageType</i> specified cannot be associated with the DB Instance. </p> StorageTypeNotSupportedFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ModifyDBInstanceError { pub fn from_body(body: &str) -> ModifyDBInstanceError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "AuthorizationNotFound" => ModifyDBInstanceError::AuthorizationNotFoundFault( String::from(parsed_error.message), ), "CertificateNotFound" => ModifyDBInstanceError::CertificateNotFoundFault( String::from(parsed_error.message), ), "DBInstanceAlreadyExists" => ModifyDBInstanceError::DBInstanceAlreadyExistsFault( String::from(parsed_error.message), ), "DBInstanceNotFound" => ModifyDBInstanceError::DBInstanceNotFoundFault( String::from(parsed_error.message), ), "DBParameterGroupNotFound" => ModifyDBInstanceError::DBParameterGroupNotFoundFault( String::from(parsed_error.message), ), "DBSecurityGroupNotFound" => ModifyDBInstanceError::DBSecurityGroupNotFoundFault( String::from(parsed_error.message), ), "DBUpgradeDependencyFailure" => { ModifyDBInstanceError::DBUpgradeDependencyFailureFault(String::from( parsed_error.message, )) } "DomainNotFoundFault" => { ModifyDBInstanceError::DomainNotFoundFault(String::from(parsed_error.message)) } "InsufficientDBInstanceCapacity" => { ModifyDBInstanceError::InsufficientDBInstanceCapacityFault(String::from( parsed_error.message, )) } "InvalidDBInstanceState" => ModifyDBInstanceError::InvalidDBInstanceStateFault( String::from(parsed_error.message), ), "InvalidDBSecurityGroupState" => { ModifyDBInstanceError::InvalidDBSecurityGroupStateFault(String::from( parsed_error.message, )) } "InvalidVPCNetworkStateFault" => { ModifyDBInstanceError::InvalidVPCNetworkStateFault(String::from( parsed_error.message, )) } "OptionGroupNotFoundFault" => ModifyDBInstanceError::OptionGroupNotFoundFault( String::from(parsed_error.message), ), "ProvisionedIopsNotAvailableInAZFault" => { ModifyDBInstanceError::ProvisionedIopsNotAvailableInAZFault(String::from( parsed_error.message, )) } "StorageQuotaExceeded" => ModifyDBInstanceError::StorageQuotaExceededFault( String::from(parsed_error.message), ), "StorageTypeNotSupported" => ModifyDBInstanceError::StorageTypeNotSupportedFault( String::from(parsed_error.message), ), _ => ModifyDBInstanceError::Unknown(String::from(body)), }, Err(_) => ModifyDBInstanceError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ModifyDBInstanceError { fn from(err: XmlParseError) -> ModifyDBInstanceError { let XmlParseError(message) = err; ModifyDBInstanceError::Unknown(message.to_string()) } } impl From<CredentialsError> for ModifyDBInstanceError { fn from(err: CredentialsError) -> ModifyDBInstanceError { ModifyDBInstanceError::Credentials(err) } } impl From<HttpDispatchError> for ModifyDBInstanceError { fn from(err: HttpDispatchError) -> ModifyDBInstanceError { ModifyDBInstanceError::HttpDispatch(err) } } impl From<io::Error> for ModifyDBInstanceError { fn from(err: io::Error) -> ModifyDBInstanceError { ModifyDBInstanceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ModifyDBInstanceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ModifyDBInstanceError { fn description(&self) -> &str { match *self { ModifyDBInstanceError::AuthorizationNotFoundFault(ref cause) => cause, ModifyDBInstanceError::CertificateNotFoundFault(ref cause) => cause, ModifyDBInstanceError::DBInstanceAlreadyExistsFault(ref cause) => cause, ModifyDBInstanceError::DBInstanceNotFoundFault(ref cause) => cause, ModifyDBInstanceError::DBParameterGroupNotFoundFault(ref cause) => cause, ModifyDBInstanceError::DBSecurityGroupNotFoundFault(ref cause) => cause, ModifyDBInstanceError::DBUpgradeDependencyFailureFault(ref cause) => cause, ModifyDBInstanceError::DomainNotFoundFault(ref cause) => cause, ModifyDBInstanceError::InsufficientDBInstanceCapacityFault(ref cause) => cause, ModifyDBInstanceError::InvalidDBInstanceStateFault(ref cause) => cause, ModifyDBInstanceError::InvalidDBSecurityGroupStateFault(ref cause) => cause, ModifyDBInstanceError::InvalidVPCNetworkStateFault(ref cause) => cause, ModifyDBInstanceError::OptionGroupNotFoundFault(ref cause) => cause, ModifyDBInstanceError::ProvisionedIopsNotAvailableInAZFault(ref cause) => cause, ModifyDBInstanceError::StorageQuotaExceededFault(ref cause) => cause, ModifyDBInstanceError::StorageTypeNotSupportedFault(ref cause) => cause, ModifyDBInstanceError::Validation(ref cause) => cause, ModifyDBInstanceError::Credentials(ref err) => err.description(), ModifyDBInstanceError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), ModifyDBInstanceError::Unknown(ref cause) => cause, } } } /// Errors returned by ModifyDBParameterGroup #[derive(Debug, PartialEq)] pub enum ModifyDBParameterGroupError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p>The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you cannot delete it when the parameter group is in this state.</p> InvalidDBParameterGroupStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ModifyDBParameterGroupError { pub fn from_body(body: &str) -> ModifyDBParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { ModifyDBParameterGroupError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBParameterGroupState" => { ModifyDBParameterGroupError::InvalidDBParameterGroupStateFault(String::from( parsed_error.message, )) } _ => ModifyDBParameterGroupError::Unknown(String::from(body)), }, Err(_) => ModifyDBParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ModifyDBParameterGroupError { fn from(err: XmlParseError) -> ModifyDBParameterGroupError { let XmlParseError(message) = err; ModifyDBParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for ModifyDBParameterGroupError { fn from(err: CredentialsError) -> ModifyDBParameterGroupError { ModifyDBParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for ModifyDBParameterGroupError { fn from(err: HttpDispatchError) -> ModifyDBParameterGroupError { ModifyDBParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for ModifyDBParameterGroupError { fn from(err: io::Error) -> ModifyDBParameterGroupError { ModifyDBParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ModifyDBParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ModifyDBParameterGroupError { fn description(&self) -> &str { match *self { ModifyDBParameterGroupError::DBParameterGroupNotFoundFault(ref cause) => cause, ModifyDBParameterGroupError::InvalidDBParameterGroupStateFault(ref cause) => cause, ModifyDBParameterGroupError::Validation(ref cause) => cause, ModifyDBParameterGroupError::Credentials(ref err) => err.description(), ModifyDBParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ModifyDBParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by ModifyDBSubnetGroup #[derive(Debug, PartialEq)] pub enum ModifyDBSubnetGroupError { /// <p>Subnets in the DB subnet group should cover at least two Availability Zones unless there is only one Availability Zone.</p> DBSubnetGroupDoesNotCoverEnoughAZs(String), /// <p> <i>DBSubnetGroupName</i> does not refer to an existing DB subnet group. </p> DBSubnetGroupNotFoundFault(String), /// <p>Request would result in user exceeding the allowed number of subnets in a DB subnet groups.</p> DBSubnetQuotaExceededFault(String), /// <p>The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.</p> InvalidSubnet(String), /// <p>The DB subnet is already in use in the Availability Zone.</p> SubnetAlreadyInUse(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ModifyDBSubnetGroupError { pub fn from_body(body: &str) -> ModifyDBSubnetGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBSubnetGroupDoesNotCoverEnoughAZs" => { ModifyDBSubnetGroupError::DBSubnetGroupDoesNotCoverEnoughAZs(String::from( parsed_error.message, )) } "DBSubnetGroupNotFoundFault" => { ModifyDBSubnetGroupError::DBSubnetGroupNotFoundFault(String::from( parsed_error.message, )) } "DBSubnetQuotaExceededFault" => { ModifyDBSubnetGroupError::DBSubnetQuotaExceededFault(String::from( parsed_error.message, )) } "InvalidSubnet" => { ModifyDBSubnetGroupError::InvalidSubnet(String::from(parsed_error.message)) } "SubnetAlreadyInUse" => { ModifyDBSubnetGroupError::SubnetAlreadyInUse(String::from(parsed_error.message)) } _ => ModifyDBSubnetGroupError::Unknown(String::from(body)), }, Err(_) => ModifyDBSubnetGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ModifyDBSubnetGroupError { fn from(err: XmlParseError) -> ModifyDBSubnetGroupError { let XmlParseError(message) = err; ModifyDBSubnetGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for ModifyDBSubnetGroupError { fn from(err: CredentialsError) -> ModifyDBSubnetGroupError { ModifyDBSubnetGroupError::Credentials(err) } } impl From<HttpDispatchError> for ModifyDBSubnetGroupError { fn from(err: HttpDispatchError) -> ModifyDBSubnetGroupError { ModifyDBSubnetGroupError::HttpDispatch(err) } } impl From<io::Error> for ModifyDBSubnetGroupError { fn from(err: io::Error) -> ModifyDBSubnetGroupError { ModifyDBSubnetGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ModifyDBSubnetGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ModifyDBSubnetGroupError { fn description(&self) -> &str { match *self { ModifyDBSubnetGroupError::DBSubnetGroupDoesNotCoverEnoughAZs(ref cause) => cause, ModifyDBSubnetGroupError::DBSubnetGroupNotFoundFault(ref cause) => cause, ModifyDBSubnetGroupError::DBSubnetQuotaExceededFault(ref cause) => cause, ModifyDBSubnetGroupError::InvalidSubnet(ref cause) => cause, ModifyDBSubnetGroupError::SubnetAlreadyInUse(ref cause) => cause, ModifyDBSubnetGroupError::Validation(ref cause) => cause, ModifyDBSubnetGroupError::Credentials(ref err) => err.description(), ModifyDBSubnetGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ModifyDBSubnetGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by ModifyEventSubscription #[derive(Debug, PartialEq)] pub enum ModifyEventSubscriptionError { EventSubscriptionQuotaExceededFault(String), SNSInvalidTopicFault(String), SNSNoAuthorizationFault(String), SNSTopicArnNotFoundFault(String), SubscriptionCategoryNotFoundFault(String), SubscriptionNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ModifyEventSubscriptionError { pub fn from_body(body: &str) -> ModifyEventSubscriptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "EventSubscriptionQuotaExceeded" => { ModifyEventSubscriptionError::EventSubscriptionQuotaExceededFault(String::from( parsed_error.message, )) } "SNSInvalidTopic" => ModifyEventSubscriptionError::SNSInvalidTopicFault( String::from(parsed_error.message), ), "SNSNoAuthorization" => ModifyEventSubscriptionError::SNSNoAuthorizationFault( String::from(parsed_error.message), ), "SNSTopicArnNotFound" => ModifyEventSubscriptionError::SNSTopicArnNotFoundFault( String::from(parsed_error.message), ), "SubscriptionCategoryNotFound" => { ModifyEventSubscriptionError::SubscriptionCategoryNotFoundFault(String::from( parsed_error.message, )) } "SubscriptionNotFound" => ModifyEventSubscriptionError::SubscriptionNotFoundFault( String::from(parsed_error.message), ), _ => ModifyEventSubscriptionError::Unknown(String::from(body)), }, Err(_) => ModifyEventSubscriptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ModifyEventSubscriptionError { fn from(err: XmlParseError) -> ModifyEventSubscriptionError { let XmlParseError(message) = err; ModifyEventSubscriptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for ModifyEventSubscriptionError { fn from(err: CredentialsError) -> ModifyEventSubscriptionError { ModifyEventSubscriptionError::Credentials(err) } } impl From<HttpDispatchError> for ModifyEventSubscriptionError { fn from(err: HttpDispatchError) -> ModifyEventSubscriptionError { ModifyEventSubscriptionError::HttpDispatch(err) } } impl From<io::Error> for ModifyEventSubscriptionError { fn from(err: io::Error) -> ModifyEventSubscriptionError { ModifyEventSubscriptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ModifyEventSubscriptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ModifyEventSubscriptionError { fn description(&self) -> &str { match *self { ModifyEventSubscriptionError::EventSubscriptionQuotaExceededFault(ref cause) => cause, ModifyEventSubscriptionError::SNSInvalidTopicFault(ref cause) => cause, ModifyEventSubscriptionError::SNSNoAuthorizationFault(ref cause) => cause, ModifyEventSubscriptionError::SNSTopicArnNotFoundFault(ref cause) => cause, ModifyEventSubscriptionError::SubscriptionCategoryNotFoundFault(ref cause) => cause, ModifyEventSubscriptionError::SubscriptionNotFoundFault(ref cause) => cause, ModifyEventSubscriptionError::Validation(ref cause) => cause, ModifyEventSubscriptionError::Credentials(ref err) => err.description(), ModifyEventSubscriptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ModifyEventSubscriptionError::Unknown(ref cause) => cause, } } } /// Errors returned by PromoteReadReplicaDBCluster #[derive(Debug, PartialEq)] pub enum PromoteReadReplicaDBClusterError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl PromoteReadReplicaDBClusterError { pub fn from_body(body: &str) -> PromoteReadReplicaDBClusterError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => { PromoteReadReplicaDBClusterError::DBClusterNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBClusterStateFault" => { PromoteReadReplicaDBClusterError::InvalidDBClusterStateFault(String::from( parsed_error.message, )) } _ => PromoteReadReplicaDBClusterError::Unknown(String::from(body)), }, Err(_) => PromoteReadReplicaDBClusterError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for PromoteReadReplicaDBClusterError { fn from(err: XmlParseError) -> PromoteReadReplicaDBClusterError { let XmlParseError(message) = err; PromoteReadReplicaDBClusterError::Unknown(message.to_string()) } } impl From<CredentialsError> for PromoteReadReplicaDBClusterError { fn from(err: CredentialsError) -> PromoteReadReplicaDBClusterError { PromoteReadReplicaDBClusterError::Credentials(err) } } impl From<HttpDispatchError> for PromoteReadReplicaDBClusterError { fn from(err: HttpDispatchError) -> PromoteReadReplicaDBClusterError { PromoteReadReplicaDBClusterError::HttpDispatch(err) } } impl From<io::Error> for PromoteReadReplicaDBClusterError { fn from(err: io::Error) -> PromoteReadReplicaDBClusterError { PromoteReadReplicaDBClusterError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for PromoteReadReplicaDBClusterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for PromoteReadReplicaDBClusterError { fn description(&self) -> &str { match *self { PromoteReadReplicaDBClusterError::DBClusterNotFoundFault(ref cause) => cause, PromoteReadReplicaDBClusterError::InvalidDBClusterStateFault(ref cause) => cause, PromoteReadReplicaDBClusterError::Validation(ref cause) => cause, PromoteReadReplicaDBClusterError::Credentials(ref err) => err.description(), PromoteReadReplicaDBClusterError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } PromoteReadReplicaDBClusterError::Unknown(ref cause) => cause, } } } /// Errors returned by RebootDBInstance #[derive(Debug, PartialEq)] pub enum RebootDBInstanceError { /// <p> <i>DBInstanceIdentifier</i> does not refer to an existing DB instance. </p> DBInstanceNotFoundFault(String), /// <p> The specified DB instance is not in the <i>available</i> state. </p> InvalidDBInstanceStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl RebootDBInstanceError { pub fn from_body(body: &str) -> RebootDBInstanceError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBInstanceNotFound" => RebootDBInstanceError::DBInstanceNotFoundFault( String::from(parsed_error.message), ), "InvalidDBInstanceState" => RebootDBInstanceError::InvalidDBInstanceStateFault( String::from(parsed_error.message), ), _ => RebootDBInstanceError::Unknown(String::from(body)), }, Err(_) => RebootDBInstanceError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for RebootDBInstanceError { fn from(err: XmlParseError) -> RebootDBInstanceError { let XmlParseError(message) = err; RebootDBInstanceError::Unknown(message.to_string()) } } impl From<CredentialsError> for RebootDBInstanceError { fn from(err: CredentialsError) -> RebootDBInstanceError { RebootDBInstanceError::Credentials(err) } } impl From<HttpDispatchError> for RebootDBInstanceError { fn from(err: HttpDispatchError) -> RebootDBInstanceError { RebootDBInstanceError::HttpDispatch(err) } } impl From<io::Error> for RebootDBInstanceError { fn from(err: io::Error) -> RebootDBInstanceError { RebootDBInstanceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RebootDBInstanceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RebootDBInstanceError { fn description(&self) -> &str { match *self { RebootDBInstanceError::DBInstanceNotFoundFault(ref cause) => cause, RebootDBInstanceError::InvalidDBInstanceStateFault(ref cause) => cause, RebootDBInstanceError::Validation(ref cause) => cause, RebootDBInstanceError::Credentials(ref err) => err.description(), RebootDBInstanceError::HttpDispatch(ref dispatch_error) => dispatch_error.description(), RebootDBInstanceError::Unknown(ref cause) => cause, } } } /// Errors returned by RemoveRoleFromDBCluster #[derive(Debug, PartialEq)] pub enum RemoveRoleFromDBClusterError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p>The specified IAM role Amazon Resource Name (ARN) is not associated with the specified DB cluster.</p> DBClusterRoleNotFoundFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl RemoveRoleFromDBClusterError { pub fn from_body(body: &str) -> RemoveRoleFromDBClusterError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => RemoveRoleFromDBClusterError::DBClusterNotFoundFault( String::from(parsed_error.message), ), "DBClusterRoleNotFound" => { RemoveRoleFromDBClusterError::DBClusterRoleNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBClusterStateFault" => { RemoveRoleFromDBClusterError::InvalidDBClusterStateFault(String::from( parsed_error.message, )) } _ => RemoveRoleFromDBClusterError::Unknown(String::from(body)), }, Err(_) => RemoveRoleFromDBClusterError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for RemoveRoleFromDBClusterError { fn from(err: XmlParseError) -> RemoveRoleFromDBClusterError { let XmlParseError(message) = err; RemoveRoleFromDBClusterError::Unknown(message.to_string()) } } impl From<CredentialsError> for RemoveRoleFromDBClusterError { fn from(err: CredentialsError) -> RemoveRoleFromDBClusterError { RemoveRoleFromDBClusterError::Credentials(err) } } impl From<HttpDispatchError> for RemoveRoleFromDBClusterError { fn from(err: HttpDispatchError) -> RemoveRoleFromDBClusterError { RemoveRoleFromDBClusterError::HttpDispatch(err) } } impl From<io::Error> for RemoveRoleFromDBClusterError { fn from(err: io::Error) -> RemoveRoleFromDBClusterError { RemoveRoleFromDBClusterError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RemoveRoleFromDBClusterError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RemoveRoleFromDBClusterError { fn description(&self) -> &str { match *self { RemoveRoleFromDBClusterError::DBClusterNotFoundFault(ref cause) => cause, RemoveRoleFromDBClusterError::DBClusterRoleNotFoundFault(ref cause) => cause, RemoveRoleFromDBClusterError::InvalidDBClusterStateFault(ref cause) => cause, RemoveRoleFromDBClusterError::Validation(ref cause) => cause, RemoveRoleFromDBClusterError::Credentials(ref err) => err.description(), RemoveRoleFromDBClusterError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } RemoveRoleFromDBClusterError::Unknown(ref cause) => cause, } } } /// Errors returned by RemoveSourceIdentifierFromSubscription #[derive(Debug, PartialEq)] pub enum RemoveSourceIdentifierFromSubscriptionError { SourceNotFoundFault(String), SubscriptionNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl RemoveSourceIdentifierFromSubscriptionError { pub fn from_body(body: &str) -> RemoveSourceIdentifierFromSubscriptionError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "SourceNotFound" => { RemoveSourceIdentifierFromSubscriptionError::SourceNotFoundFault(String::from( parsed_error.message, )) } "SubscriptionNotFound" => { RemoveSourceIdentifierFromSubscriptionError::SubscriptionNotFoundFault( String::from(parsed_error.message), ) } _ => RemoveSourceIdentifierFromSubscriptionError::Unknown(String::from(body)), }, Err(_) => RemoveSourceIdentifierFromSubscriptionError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for RemoveSourceIdentifierFromSubscriptionError { fn from(err: XmlParseError) -> RemoveSourceIdentifierFromSubscriptionError { let XmlParseError(message) = err; RemoveSourceIdentifierFromSubscriptionError::Unknown(message.to_string()) } } impl From<CredentialsError> for RemoveSourceIdentifierFromSubscriptionError { fn from(err: CredentialsError) -> RemoveSourceIdentifierFromSubscriptionError { RemoveSourceIdentifierFromSubscriptionError::Credentials(err) } } impl From<HttpDispatchError> for RemoveSourceIdentifierFromSubscriptionError { fn from(err: HttpDispatchError) -> RemoveSourceIdentifierFromSubscriptionError { RemoveSourceIdentifierFromSubscriptionError::HttpDispatch(err) } } impl From<io::Error> for RemoveSourceIdentifierFromSubscriptionError { fn from(err: io::Error) -> RemoveSourceIdentifierFromSubscriptionError { RemoveSourceIdentifierFromSubscriptionError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RemoveSourceIdentifierFromSubscriptionError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RemoveSourceIdentifierFromSubscriptionError { fn description(&self) -> &str { match *self { RemoveSourceIdentifierFromSubscriptionError::SourceNotFoundFault(ref cause) => cause, RemoveSourceIdentifierFromSubscriptionError::SubscriptionNotFoundFault(ref cause) => { cause } RemoveSourceIdentifierFromSubscriptionError::Validation(ref cause) => cause, RemoveSourceIdentifierFromSubscriptionError::Credentials(ref err) => err.description(), RemoveSourceIdentifierFromSubscriptionError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } RemoveSourceIdentifierFromSubscriptionError::Unknown(ref cause) => cause, } } } /// Errors returned by RemoveTagsFromResource #[derive(Debug, PartialEq)] pub enum RemoveTagsFromResourceError { /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p> <i>DBInstanceIdentifier</i> does not refer to an existing DB instance. </p> DBInstanceNotFoundFault(String), /// <p> <i>DBSnapshotIdentifier</i> does not refer to an existing DB snapshot. </p> DBSnapshotNotFoundFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl RemoveTagsFromResourceError { pub fn from_body(body: &str) -> RemoveTagsFromResourceError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterNotFoundFault" => RemoveTagsFromResourceError::DBClusterNotFoundFault( String::from(parsed_error.message), ), "DBInstanceNotFound" => RemoveTagsFromResourceError::DBInstanceNotFoundFault( String::from(parsed_error.message), ), "DBSnapshotNotFound" => RemoveTagsFromResourceError::DBSnapshotNotFoundFault( String::from(parsed_error.message), ), _ => RemoveTagsFromResourceError::Unknown(String::from(body)), }, Err(_) => RemoveTagsFromResourceError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for RemoveTagsFromResourceError { fn from(err: XmlParseError) -> RemoveTagsFromResourceError { let XmlParseError(message) = err; RemoveTagsFromResourceError::Unknown(message.to_string()) } } impl From<CredentialsError> for RemoveTagsFromResourceError { fn from(err: CredentialsError) -> RemoveTagsFromResourceError { RemoveTagsFromResourceError::Credentials(err) } } impl From<HttpDispatchError> for RemoveTagsFromResourceError { fn from(err: HttpDispatchError) -> RemoveTagsFromResourceError { RemoveTagsFromResourceError::HttpDispatch(err) } } impl From<io::Error> for RemoveTagsFromResourceError { fn from(err: io::Error) -> RemoveTagsFromResourceError { RemoveTagsFromResourceError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RemoveTagsFromResourceError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RemoveTagsFromResourceError { fn description(&self) -> &str { match *self { RemoveTagsFromResourceError::DBClusterNotFoundFault(ref cause) => cause, RemoveTagsFromResourceError::DBInstanceNotFoundFault(ref cause) => cause, RemoveTagsFromResourceError::DBSnapshotNotFoundFault(ref cause) => cause, RemoveTagsFromResourceError::Validation(ref cause) => cause, RemoveTagsFromResourceError::Credentials(ref err) => err.description(), RemoveTagsFromResourceError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } RemoveTagsFromResourceError::Unknown(ref cause) => cause, } } } /// Errors returned by ResetDBClusterParameterGroup #[derive(Debug, PartialEq)] pub enum ResetDBClusterParameterGroupError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p>The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you cannot delete it when the parameter group is in this state.</p> InvalidDBParameterGroupStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ResetDBClusterParameterGroupError { pub fn from_body(body: &str) -> ResetDBClusterParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { ResetDBClusterParameterGroupError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBParameterGroupState" => { ResetDBClusterParameterGroupError::InvalidDBParameterGroupStateFault( String::from(parsed_error.message), ) } _ => ResetDBClusterParameterGroupError::Unknown(String::from(body)), }, Err(_) => ResetDBClusterParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ResetDBClusterParameterGroupError { fn from(err: XmlParseError) -> ResetDBClusterParameterGroupError { let XmlParseError(message) = err; ResetDBClusterParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for ResetDBClusterParameterGroupError { fn from(err: CredentialsError) -> ResetDBClusterParameterGroupError { ResetDBClusterParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for ResetDBClusterParameterGroupError { fn from(err: HttpDispatchError) -> ResetDBClusterParameterGroupError { ResetDBClusterParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for ResetDBClusterParameterGroupError { fn from(err: io::Error) -> ResetDBClusterParameterGroupError { ResetDBClusterParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ResetDBClusterParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ResetDBClusterParameterGroupError { fn description(&self) -> &str { match *self { ResetDBClusterParameterGroupError::DBParameterGroupNotFoundFault(ref cause) => cause, ResetDBClusterParameterGroupError::InvalidDBParameterGroupStateFault(ref cause) => { cause } ResetDBClusterParameterGroupError::Validation(ref cause) => cause, ResetDBClusterParameterGroupError::Credentials(ref err) => err.description(), ResetDBClusterParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ResetDBClusterParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by ResetDBParameterGroup #[derive(Debug, PartialEq)] pub enum ResetDBParameterGroupError { /// <p> <i>DBParameterGroupName</i> does not refer to an existing DB parameter group. </p> DBParameterGroupNotFoundFault(String), /// <p>The DB parameter group is in use or is in an invalid state. If you are attempting to delete the parameter group, you cannot delete it when the parameter group is in this state.</p> InvalidDBParameterGroupStateFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl ResetDBParameterGroupError { pub fn from_body(body: &str) -> ResetDBParameterGroupError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBParameterGroupNotFound" => { ResetDBParameterGroupError::DBParameterGroupNotFoundFault(String::from( parsed_error.message, )) } "InvalidDBParameterGroupState" => { ResetDBParameterGroupError::InvalidDBParameterGroupStateFault(String::from( parsed_error.message, )) } _ => ResetDBParameterGroupError::Unknown(String::from(body)), }, Err(_) => ResetDBParameterGroupError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for ResetDBParameterGroupError { fn from(err: XmlParseError) -> ResetDBParameterGroupError { let XmlParseError(message) = err; ResetDBParameterGroupError::Unknown(message.to_string()) } } impl From<CredentialsError> for ResetDBParameterGroupError { fn from(err: CredentialsError) -> ResetDBParameterGroupError { ResetDBParameterGroupError::Credentials(err) } } impl From<HttpDispatchError> for ResetDBParameterGroupError { fn from(err: HttpDispatchError) -> ResetDBParameterGroupError { ResetDBParameterGroupError::HttpDispatch(err) } } impl From<io::Error> for ResetDBParameterGroupError { fn from(err: io::Error) -> ResetDBParameterGroupError { ResetDBParameterGroupError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for ResetDBParameterGroupError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for ResetDBParameterGroupError { fn description(&self) -> &str { match *self { ResetDBParameterGroupError::DBParameterGroupNotFoundFault(ref cause) => cause, ResetDBParameterGroupError::InvalidDBParameterGroupStateFault(ref cause) => cause, ResetDBParameterGroupError::Validation(ref cause) => cause, ResetDBParameterGroupError::Credentials(ref err) => err.description(), ResetDBParameterGroupError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } ResetDBParameterGroupError::Unknown(ref cause) => cause, } } } /// Errors returned by RestoreDBClusterFromSnapshot #[derive(Debug, PartialEq)] pub enum RestoreDBClusterFromSnapshotError { /// <p>User already has a DB cluster with the given identifier.</p> DBClusterAlreadyExistsFault(String), /// <p>User attempted to create a new DB cluster and the user has already reached the maximum allowed DB cluster quota.</p> DBClusterQuotaExceededFault(String), /// <p> <i>DBClusterSnapshotIdentifier</i> does not refer to an existing DB cluster snapshot. </p> DBClusterSnapshotNotFoundFault(String), /// <p> <i>DBSnapshotIdentifier</i> does not refer to an existing DB snapshot. </p> DBSnapshotNotFoundFault(String), /// <p> <i>DBSubnetGroupName</i> does not refer to an existing DB subnet group. </p> DBSubnetGroupNotFoundFault(String), /// <p>The DB cluster does not have enough capacity for the current operation.</p> InsufficientDBClusterCapacityFault(String), /// <p>There is insufficient storage available for the current action. You may be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.</p> InsufficientStorageClusterCapacityFault(String), /// <p>The supplied value is not a valid DB cluster snapshot state.</p> InvalidDBClusterSnapshotStateFault(String), /// <p>The state of the DB snapshot does not allow deletion.</p> InvalidDBSnapshotStateFault(String), /// <p>Cannot restore from vpc backup to non-vpc DB instance.</p> InvalidRestoreFault(String), /// <p>The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.</p> InvalidSubnet(String), /// <p>DB subnet group does not cover all Availability Zones after it is created because users' change.</p> InvalidVPCNetworkStateFault(String), /// <p>Error accessing KMS key.</p> KMSKeyNotAccessibleFault(String), OptionGroupNotFoundFault(String), /// <p>Request would result in user exceeding the allowed amount of storage available across all DB instances.</p> StorageQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl RestoreDBClusterFromSnapshotError { pub fn from_body(body: &str) -> RestoreDBClusterFromSnapshotError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterAlreadyExistsFault" => { RestoreDBClusterFromSnapshotError::DBClusterAlreadyExistsFault(String::from( parsed_error.message, )) } "DBClusterQuotaExceededFault" => { RestoreDBClusterFromSnapshotError::DBClusterQuotaExceededFault(String::from( parsed_error.message, )) } "DBClusterSnapshotNotFoundFault" => { RestoreDBClusterFromSnapshotError::DBClusterSnapshotNotFoundFault(String::from( parsed_error.message, )) } "DBSnapshotNotFound" => RestoreDBClusterFromSnapshotError::DBSnapshotNotFoundFault( String::from(parsed_error.message), ), "DBSubnetGroupNotFoundFault" => { RestoreDBClusterFromSnapshotError::DBSubnetGroupNotFoundFault(String::from( parsed_error.message, )) } "InsufficientDBClusterCapacityFault" => { RestoreDBClusterFromSnapshotError::InsufficientDBClusterCapacityFault( String::from(parsed_error.message), ) } "InsufficientStorageClusterCapacity" => { RestoreDBClusterFromSnapshotError::InsufficientStorageClusterCapacityFault( String::from(parsed_error.message), ) } "InvalidDBClusterSnapshotStateFault" => { RestoreDBClusterFromSnapshotError::InvalidDBClusterSnapshotStateFault( String::from(parsed_error.message), ) } "InvalidDBSnapshotState" => { RestoreDBClusterFromSnapshotError::InvalidDBSnapshotStateFault(String::from( parsed_error.message, )) } "InvalidRestoreFault" => RestoreDBClusterFromSnapshotError::InvalidRestoreFault( String::from(parsed_error.message), ), "InvalidSubnet" => RestoreDBClusterFromSnapshotError::InvalidSubnet(String::from( parsed_error.message, )), "InvalidVPCNetworkStateFault" => { RestoreDBClusterFromSnapshotError::InvalidVPCNetworkStateFault(String::from( parsed_error.message, )) } "KMSKeyNotAccessibleFault" => { RestoreDBClusterFromSnapshotError::KMSKeyNotAccessibleFault(String::from( parsed_error.message, )) } "OptionGroupNotFoundFault" => { RestoreDBClusterFromSnapshotError::OptionGroupNotFoundFault(String::from( parsed_error.message, )) } "StorageQuotaExceeded" => { RestoreDBClusterFromSnapshotError::StorageQuotaExceededFault(String::from( parsed_error.message, )) } _ => RestoreDBClusterFromSnapshotError::Unknown(String::from(body)), }, Err(_) => RestoreDBClusterFromSnapshotError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for RestoreDBClusterFromSnapshotError { fn from(err: XmlParseError) -> RestoreDBClusterFromSnapshotError { let XmlParseError(message) = err; RestoreDBClusterFromSnapshotError::Unknown(message.to_string()) } } impl From<CredentialsError> for RestoreDBClusterFromSnapshotError { fn from(err: CredentialsError) -> RestoreDBClusterFromSnapshotError { RestoreDBClusterFromSnapshotError::Credentials(err) } } impl From<HttpDispatchError> for RestoreDBClusterFromSnapshotError { fn from(err: HttpDispatchError) -> RestoreDBClusterFromSnapshotError { RestoreDBClusterFromSnapshotError::HttpDispatch(err) } } impl From<io::Error> for RestoreDBClusterFromSnapshotError { fn from(err: io::Error) -> RestoreDBClusterFromSnapshotError { RestoreDBClusterFromSnapshotError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RestoreDBClusterFromSnapshotError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RestoreDBClusterFromSnapshotError { fn description(&self) -> &str { match *self { RestoreDBClusterFromSnapshotError::DBClusterAlreadyExistsFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::DBClusterQuotaExceededFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::DBClusterSnapshotNotFoundFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::DBSnapshotNotFoundFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::DBSubnetGroupNotFoundFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::InsufficientDBClusterCapacityFault(ref cause) => { cause } RestoreDBClusterFromSnapshotError::InsufficientStorageClusterCapacityFault( ref cause, ) => cause, RestoreDBClusterFromSnapshotError::InvalidDBClusterSnapshotStateFault(ref cause) => { cause } RestoreDBClusterFromSnapshotError::InvalidDBSnapshotStateFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::InvalidRestoreFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::InvalidSubnet(ref cause) => cause, RestoreDBClusterFromSnapshotError::InvalidVPCNetworkStateFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::KMSKeyNotAccessibleFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::OptionGroupNotFoundFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::StorageQuotaExceededFault(ref cause) => cause, RestoreDBClusterFromSnapshotError::Validation(ref cause) => cause, RestoreDBClusterFromSnapshotError::Credentials(ref err) => err.description(), RestoreDBClusterFromSnapshotError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } RestoreDBClusterFromSnapshotError::Unknown(ref cause) => cause, } } } /// Errors returned by RestoreDBClusterToPointInTime #[derive(Debug, PartialEq)] pub enum RestoreDBClusterToPointInTimeError { /// <p>User already has a DB cluster with the given identifier.</p> DBClusterAlreadyExistsFault(String), /// <p> <i>DBClusterIdentifier</i> does not refer to an existing DB cluster. </p> DBClusterNotFoundFault(String), /// <p>User attempted to create a new DB cluster and the user has already reached the maximum allowed DB cluster quota.</p> DBClusterQuotaExceededFault(String), /// <p> <i>DBClusterSnapshotIdentifier</i> does not refer to an existing DB cluster snapshot. </p> DBClusterSnapshotNotFoundFault(String), /// <p> <i>DBSubnetGroupName</i> does not refer to an existing DB subnet group. </p> DBSubnetGroupNotFoundFault(String), /// <p>The DB cluster does not have enough capacity for the current operation.</p> InsufficientDBClusterCapacityFault(String), /// <p>There is insufficient storage available for the current action. You may be able to resolve this error by updating your subnet group to use different Availability Zones that have more storage available.</p> InsufficientStorageClusterCapacityFault(String), /// <p>The supplied value is not a valid DB cluster snapshot state.</p> InvalidDBClusterSnapshotStateFault(String), /// <p>The DB cluster is not in a valid state.</p> InvalidDBClusterStateFault(String), /// <p>The state of the DB snapshot does not allow deletion.</p> InvalidDBSnapshotStateFault(String), /// <p>Cannot restore from vpc backup to non-vpc DB instance.</p> InvalidRestoreFault(String), /// <p>The requested subnet is invalid, or multiple subnets were requested that are not all in a common VPC.</p> InvalidSubnet(String), /// <p>DB subnet group does not cover all Availability Zones after it is created because users' change.</p> InvalidVPCNetworkStateFault(String), /// <p>Error accessing KMS key.</p> KMSKeyNotAccessibleFault(String), OptionGroupNotFoundFault(String), /// <p>Request would result in user exceeding the allowed amount of storage available across all DB instances.</p> StorageQuotaExceededFault(String), /// An error occurred dispatching the HTTP request HttpDispatch(HttpDispatchError), /// An error was encountered with AWS credentials. Credentials(CredentialsError), /// A validation error occurred. Details from AWS are provided. Validation(String), /// An unknown error occurred. The raw HTTP response is provided. Unknown(String), } impl RestoreDBClusterToPointInTimeError { pub fn from_body(body: &str) -> RestoreDBClusterToPointInTimeError { let reader = EventReader::new(body.as_bytes()); let mut stack = XmlResponse::new(reader.into_iter().peekable()); find_start_element(&mut stack); match Self::deserialize(&mut stack) { Ok(parsed_error) => match &parsed_error.code[..] { "DBClusterAlreadyExistsFault" => { RestoreDBClusterToPointInTimeError::DBClusterAlreadyExistsFault(String::from( parsed_error.message, )) } "DBClusterNotFoundFault" => { RestoreDBClusterToPointInTimeError::DBClusterNotFoundFault(String::from( parsed_error.message, )) } "DBClusterQuotaExceededFault" => { RestoreDBClusterToPointInTimeError::DBClusterQuotaExceededFault(String::from( parsed_error.message, )) } "DBClusterSnapshotNotFoundFault" => { RestoreDBClusterToPointInTimeError::DBClusterSnapshotNotFoundFault( String::from(parsed_error.message), ) } "DBSubnetGroupNotFoundFault" => { RestoreDBClusterToPointInTimeError::DBSubnetGroupNotFoundFault(String::from( parsed_error.message, )) } "InsufficientDBClusterCapacityFault" => { RestoreDBClusterToPointInTimeError::InsufficientDBClusterCapacityFault( String::from(parsed_error.message), ) } "InsufficientStorageClusterCapacity" => { RestoreDBClusterToPointInTimeError::InsufficientStorageClusterCapacityFault( String::from(parsed_error.message), ) } "InvalidDBClusterSnapshotStateFault" => { RestoreDBClusterToPointInTimeError::InvalidDBClusterSnapshotStateFault( String::from(parsed_error.message), ) } "InvalidDBClusterStateFault" => { RestoreDBClusterToPointInTimeError::InvalidDBClusterStateFault(String::from( parsed_error.message, )) } "InvalidDBSnapshotState" => { RestoreDBClusterToPointInTimeError::InvalidDBSnapshotStateFault(String::from( parsed_error.message, )) } "InvalidRestoreFault" => RestoreDBClusterToPointInTimeError::InvalidRestoreFault( String::from(parsed_error.message), ), "InvalidSubnet" => RestoreDBClusterToPointInTimeError::InvalidSubnet(String::from( parsed_error.message, )), "InvalidVPCNetworkStateFault" => { RestoreDBClusterToPointInTimeError::InvalidVPCNetworkStateFault(String::from( parsed_error.message, )) } "KMSKeyNotAccessibleFault" => { RestoreDBClusterToPointInTimeError::KMSKeyNotAccessibleFault(String::from( parsed_error.message, )) } "OptionGroupNotFoundFault" => { RestoreDBClusterToPointInTimeError::OptionGroupNotFoundFault(String::from( parsed_error.message, )) } "StorageQuotaExceeded" => { RestoreDBClusterToPointInTimeError::StorageQuotaExceededFault(String::from( parsed_error.message, )) } _ => RestoreDBClusterToPointInTimeError::Unknown(String::from(body)), }, Err(_) => RestoreDBClusterToPointInTimeError::Unknown(body.to_string()), } } fn deserialize<T>(stack: &mut T) -> Result<XmlError, XmlParseError> where T: Peek + Next, { start_element("ErrorResponse", stack)?; XmlErrorDeserializer::deserialize("Error", stack) } } impl From<XmlParseError> for RestoreDBClusterToPointInTimeError { fn from(err: XmlParseError) -> RestoreDBClusterToPointInTimeError { let XmlParseError(message) = err; RestoreDBClusterToPointInTimeError::Unknown(message.to_string()) } } impl From<CredentialsError> for RestoreDBClusterToPointInTimeError { fn from(err: CredentialsError) -> RestoreDBClusterToPointInTimeError { RestoreDBClusterToPointInTimeError::Credentials(err) } } impl From<HttpDispatchError> for RestoreDBClusterToPointInTimeError { fn from(err: HttpDispatchError) -> RestoreDBClusterToPointInTimeError { RestoreDBClusterToPointInTimeError::HttpDispatch(err) } } impl From<io::Error> for RestoreDBClusterToPointInTimeError { fn from(err: io::Error) -> RestoreDBClusterToPointInTimeError { RestoreDBClusterToPointInTimeError::HttpDispatch(HttpDispatchError::from(err)) } } impl fmt::Display for RestoreDBClusterToPointInTimeError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.description()) } } impl Error for RestoreDBClusterToPointInTimeError { fn description(&self) -> &str { match *self { RestoreDBClusterToPointInTimeError::DBClusterAlreadyExistsFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::DBClusterNotFoundFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::DBClusterQuotaExceededFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::DBClusterSnapshotNotFoundFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::DBSubnetGroupNotFoundFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::InsufficientDBClusterCapacityFault(ref cause) => { cause } RestoreDBClusterToPointInTimeError::InsufficientStorageClusterCapacityFault( ref cause, ) => cause, RestoreDBClusterToPointInTimeError::InvalidDBClusterSnapshotStateFault(ref cause) => { cause } RestoreDBClusterToPointInTimeError::InvalidDBClusterStateFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::InvalidDBSnapshotStateFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::InvalidRestoreFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::InvalidSubnet(ref cause) => cause, RestoreDBClusterToPointInTimeError::InvalidVPCNetworkStateFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::KMSKeyNotAccessibleFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::OptionGroupNotFoundFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::StorageQuotaExceededFault(ref cause) => cause, RestoreDBClusterToPointInTimeError::Validation(ref cause) => cause, RestoreDBClusterToPointInTimeError::Credentials(ref err) => err.description(), RestoreDBClusterToPointInTimeError::HttpDispatch(ref dispatch_error) => { dispatch_error.description() } RestoreDBClusterToPointInTimeError::Unknown(ref cause) => cause, } } } /// Trait representing the capabilities of the Amazon Neptune API. Amazon Neptune clients implement this trait. pub trait Neptune { /// <p>Associates an Identity and Access Management (IAM) role from an Neptune DB cluster. </p> fn add_role_to_db_cluster( &self, input: AddRoleToDBClusterMessage, ) -> RusotoFuture<(), AddRoleToDBClusterError>; /// <p>Adds a source identifier to an existing event notification subscription.</p> fn add_source_identifier_to_subscription( &self, input: AddSourceIdentifierToSubscriptionMessage, ) -> RusotoFuture<AddSourceIdentifierToSubscriptionResult, AddSourceIdentifierToSubscriptionError>; /// <p>Adds metadata tags to an Amazon Neptune resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon Neptune resources, or used in a Condition statement in an IAM policy for Amazon Neptune.</p> fn add_tags_to_resource( &self, input: AddTagsToResourceMessage, ) -> RusotoFuture<(), AddTagsToResourceError>; /// <p>Applies a pending maintenance action to a resource (for example, to a DB instance).</p> fn apply_pending_maintenance_action( &self, input: ApplyPendingMaintenanceActionMessage, ) -> RusotoFuture<ApplyPendingMaintenanceActionResult, ApplyPendingMaintenanceActionError>; /// <p>Copies the specified DB cluster parameter group.</p> fn copy_db_cluster_parameter_group( &self, input: CopyDBClusterParameterGroupMessage, ) -> RusotoFuture<CopyDBClusterParameterGroupResult, CopyDBClusterParameterGroupError>; /// <p>Copies a snapshot of a DB cluster.</p> <p>To copy a DB cluster snapshot from a shared manual DB cluster snapshot, <code>SourceDBClusterSnapshotIdentifier</code> must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.</p> <p>You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the <code>CopyDBClusterSnapshot</code> action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:</p> <ul> <li> <p> <code>KmsKeyId</code> - The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.</p> </li> <li> <p> <code>PreSignedUrl</code> - A URL that contains a Signature Version 4 signed request for the <code>CopyDBClusterSnapshot</code> action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the <code>CopyDBClusterSnapshot</code> API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.</p> <p>The pre-signed URL request must contain the following parameter values:</p> <ul> <li> <p> <code>KmsKeyId</code> - The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the <code>CopyDBClusterSnapshot</code> action that is called in the destination AWS Region, and the action contained in the pre-signed URL.</p> </li> <li> <p> <code>DestinationRegion</code> - The name of the AWS Region that the DB cluster snapshot will be created in.</p> </li> <li> <p> <code>SourceDBClusterSnapshotIdentifier</code> - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your <code>SourceDBClusterSnapshotIdentifier</code> looks like the following example: <code>arn:aws:rds:us-west-2:123456789012:cluster-snapshot:neptune-cluster1-snapshot-20161115</code>.</p> </li> </ul> <p>To learn how to generate a Signature Version 4 signed request, see <a href="http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html"> Authenticating Requests: Using Query Parameters (AWS Signature Version 4)</a> and <a href="http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html"> Signature Version 4 Signing Process</a>.</p> </li> <li> <p> <code>TargetDBClusterSnapshotIdentifier</code> - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.</p> </li> <li> <p> <code>SourceDBClusterSnapshotIdentifier</code> - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the <code>SourceDBClusterSnapshotIdentifier</code> in the pre-signed URL. </p> </li> </ul> <p>To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by <code>TargetDBClusterSnapshotIdentifier</code> while that DB cluster snapshot is in "copying" status.</p> fn copy_db_cluster_snapshot( &self, input: CopyDBClusterSnapshotMessage, ) -> RusotoFuture<CopyDBClusterSnapshotResult, CopyDBClusterSnapshotError>; /// <p>Copies the specified DB parameter group.</p> fn copy_db_parameter_group( &self, input: CopyDBParameterGroupMessage, ) -> RusotoFuture<CopyDBParameterGroupResult, CopyDBParameterGroupError>; /// <p>Creates a new Amazon Neptune DB cluster.</p> <p>You can use the <code>ReplicationSourceIdentifier</code> parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon Neptune DB instance. For cross-region replication where the DB cluster identified by <code>ReplicationSourceIdentifier</code> is encrypted, you must also specify the <code>PreSignedUrl</code> parameter.</p> fn create_db_cluster( &self, input: CreateDBClusterMessage, ) -> RusotoFuture<CreateDBClusterResult, CreateDBClusterError>; /// <p><p>Creates a new DB cluster parameter group.</p> <p>Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.</p> <p> A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using <a>ModifyDBClusterParameterGroup</a>. Once you&#39;ve created a DB cluster parameter group, you need to associate it with your DB cluster using <a>ModifyDBCluster</a>. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect. </p> <important> <p>After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon Neptune to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the <code>character<em>set</em>database</code> parameter. You can use the <i>Parameter Groups</i> option of the <a href="https://console.aws.amazon.com/rds/">Amazon Neptune console</a> or the <a>DescribeDBClusterParameters</a> command to verify that your DB cluster parameter group has been created or modified.</p> </important></p> fn create_db_cluster_parameter_group( &self, input: CreateDBClusterParameterGroupMessage, ) -> RusotoFuture<CreateDBClusterParameterGroupResult, CreateDBClusterParameterGroupError>; /// <p>Creates a snapshot of a DB cluster. </p> fn create_db_cluster_snapshot( &self, input: CreateDBClusterSnapshotMessage, ) -> RusotoFuture<CreateDBClusterSnapshotResult, CreateDBClusterSnapshotError>; /// <p>Creates a new DB instance.</p> fn create_db_instance( &self, input: CreateDBInstanceMessage, ) -> RusotoFuture<CreateDBInstanceResult, CreateDBInstanceError>; /// <p><p>Creates a new DB parameter group.</p> <p> A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using <i>ModifyDBParameterGroup</i>. Once you&#39;ve created a DB parameter group, you need to associate it with your DB instance using <i>ModifyDBInstance</i>. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect. </p> <important> <p>After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon Neptune to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the <code>character<em>set</em>database</code> parameter. You can use the <i>Parameter Groups</i> option of the Amazon Neptune console or the <i>DescribeDBParameters</i> command to verify that your DB parameter group has been created or modified.</p> </important></p> fn create_db_parameter_group( &self, input: CreateDBParameterGroupMessage, ) -> RusotoFuture<CreateDBParameterGroupResult, CreateDBParameterGroupError>; /// <p>Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.</p> fn create_db_subnet_group( &self, input: CreateDBSubnetGroupMessage, ) -> RusotoFuture<CreateDBSubnetGroupResult, CreateDBSubnetGroupError>; /// <p>Creates an event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the Neptune console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.</p> <p>You can specify the type of source (SourceType) you want to be notified of, provide a list of Neptune sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.</p> <p>If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your Neptune sources. If you do not specify either the SourceType nor the SourceIdentifier, you are notified of events generated from all Neptune sources belonging to your customer account.</p> fn create_event_subscription( &self, input: CreateEventSubscriptionMessage, ) -> RusotoFuture<CreateEventSubscriptionResult, CreateEventSubscriptionError>; /// <p><p>The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can&#39;t be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.</p> <p/></p> fn delete_db_cluster( &self, input: DeleteDBClusterMessage, ) -> RusotoFuture<DeleteDBClusterResult, DeleteDBClusterError>; /// <p>Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.</p> fn delete_db_cluster_parameter_group( &self, input: DeleteDBClusterParameterGroupMessage, ) -> RusotoFuture<(), DeleteDBClusterParameterGroupError>; /// <p><p>Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.</p> <note> <p>The DB cluster snapshot must be in the <code>available</code> state to be deleted.</p> </note></p> fn delete_db_cluster_snapshot( &self, input: DeleteDBClusterSnapshotMessage, ) -> RusotoFuture<DeleteDBClusterSnapshotResult, DeleteDBClusterSnapshotError>; /// <p>The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by <code>DeleteDBInstance</code> are not deleted.</p> <p> If you request a final DB snapshot the status of the Amazon Neptune DB instance is <code>deleting</code> until the DB snapshot is created. The API action <code>DescribeDBInstance</code> is used to monitor the status of this operation. The action can't be canceled or reverted once submitted. </p> <p>Note that when a DB instance is in a failure state and has a status of <code>failed</code>, <code>incompatible-restore</code>, or <code>incompatible-network</code>, you can only delete it when the <code>SkipFinalSnapshot</code> parameter is set to <code>true</code>.</p> <p>If the specified DB instance is part of a DB cluster, you can't delete the DB instance if both of the following conditions are true:</p> <ul> <li> <p>The DB cluster is a Read Replica of another DB cluster.</p> </li> <li> <p>The DB instance is the only instance in the DB cluster.</p> </li> </ul> <p>To delete a DB instance in this case, first call the <a>PromoteReadReplicaDBCluster</a> API action to promote the DB cluster so it's no longer a Read Replica. After the promotion completes, then call the <code>DeleteDBInstance</code> API action to delete the final instance in the DB cluster.</p> fn delete_db_instance( &self, input: DeleteDBInstanceMessage, ) -> RusotoFuture<DeleteDBInstanceResult, DeleteDBInstanceError>; /// <p>Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted can't be associated with any DB instances.</p> fn delete_db_parameter_group( &self, input: DeleteDBParameterGroupMessage, ) -> RusotoFuture<(), DeleteDBParameterGroupError>; /// <p><p>Deletes a DB subnet group.</p> <note> <p>The specified database subnet group must not be associated with any DB instances.</p> </note></p> fn delete_db_subnet_group( &self, input: DeleteDBSubnetGroupMessage, ) -> RusotoFuture<(), DeleteDBSubnetGroupError>; /// <p>Deletes an event notification subscription.</p> fn delete_event_subscription( &self, input: DeleteEventSubscriptionMessage, ) -> RusotoFuture<DeleteEventSubscriptionResult, DeleteEventSubscriptionError>; /// <p> Returns a list of <code>DBClusterParameterGroup</code> descriptions. If a <code>DBClusterParameterGroupName</code> parameter is specified, the list will contain only the description of the specified DB cluster parameter group. </p> fn describe_db_cluster_parameter_groups( &self, input: DescribeDBClusterParameterGroupsMessage, ) -> RusotoFuture<DBClusterParameterGroupsMessage, DescribeDBClusterParameterGroupsError>; /// <p>Returns the detailed parameter list for a particular DB cluster parameter group.</p> fn describe_db_cluster_parameters( &self, input: DescribeDBClusterParametersMessage, ) -> RusotoFuture<DBClusterParameterGroupDetails, DescribeDBClusterParametersError>; /// <p>Returns a list of DB cluster snapshot attribute names and values for a manual DB cluster snapshot.</p> <p>When sharing snapshots with other AWS accounts, <code>DescribeDBClusterSnapshotAttributes</code> returns the <code>restore</code> attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If <code>all</code> is included in the list of values for the <code>restore</code> attribute, then the manual DB cluster snapshot is public and can be copied or restored by all AWS accounts.</p> <p>To add or remove access for an AWS account to copy or restore a manual DB cluster snapshot, or to make the manual DB cluster snapshot public or private, use the <a>ModifyDBClusterSnapshotAttribute</a> API action.</p> fn describe_db_cluster_snapshot_attributes( &self, input: DescribeDBClusterSnapshotAttributesMessage, ) -> RusotoFuture< DescribeDBClusterSnapshotAttributesResult, DescribeDBClusterSnapshotAttributesError, >; /// <p>Returns information about DB cluster snapshots. This API action supports pagination.</p> fn describe_db_cluster_snapshots( &self, input: DescribeDBClusterSnapshotsMessage, ) -> RusotoFuture<DBClusterSnapshotMessage, DescribeDBClusterSnapshotsError>; /// <p>Returns information about provisioned DB clusters. This API supports pagination.</p> fn describe_db_clusters( &self, input: DescribeDBClustersMessage, ) -> RusotoFuture<DBClusterMessage, DescribeDBClustersError>; /// <p>Returns a list of the available DB engines.</p> fn describe_db_engine_versions( &self, input: DescribeDBEngineVersionsMessage, ) -> RusotoFuture<DBEngineVersionMessage, DescribeDBEngineVersionsError>; /// <p>Returns information about provisioned instances. This API supports pagination.</p> fn describe_db_instances( &self, input: DescribeDBInstancesMessage, ) -> RusotoFuture<DBInstanceMessage, DescribeDBInstancesError>; /// <p> Returns a list of <code>DBParameterGroup</code> descriptions. If a <code>DBParameterGroupName</code> is specified, the list will contain only the description of the specified DB parameter group. </p> fn describe_db_parameter_groups( &self, input: DescribeDBParameterGroupsMessage, ) -> RusotoFuture<DBParameterGroupsMessage, DescribeDBParameterGroupsError>; /// <p>Returns the detailed parameter list for a particular DB parameter group.</p> fn describe_db_parameters( &self, input: DescribeDBParametersMessage, ) -> RusotoFuture<DBParameterGroupDetails, DescribeDBParametersError>; /// <p>Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.</p> <p>For an overview of CIDR ranges, go to the <a href="http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing">Wikipedia Tutorial</a>. </p> fn describe_db_subnet_groups( &self, input: DescribeDBSubnetGroupsMessage, ) -> RusotoFuture<DBSubnetGroupMessage, DescribeDBSubnetGroupsError>; /// <p>Returns the default engine and system parameter information for the cluster database engine.</p> fn describe_engine_default_cluster_parameters( &self, input: DescribeEngineDefaultClusterParametersMessage, ) -> RusotoFuture< DescribeEngineDefaultClusterParametersResult, DescribeEngineDefaultClusterParametersError, >; /// <p>Returns the default engine and system parameter information for the specified database engine.</p> fn describe_engine_default_parameters( &self, input: DescribeEngineDefaultParametersMessage, ) -> RusotoFuture<DescribeEngineDefaultParametersResult, DescribeEngineDefaultParametersError>; /// <p>Displays a list of categories for all event source types, or, if specified, for a specified source type. </p> fn describe_event_categories( &self, input: DescribeEventCategoriesMessage, ) -> RusotoFuture<EventCategoriesMessage, DescribeEventCategoriesError>; /// <p>Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.</p> <p>If you specify a SubscriptionName, lists the description for that subscription.</p> fn describe_event_subscriptions( &self, input: DescribeEventSubscriptionsMessage, ) -> RusotoFuture<EventSubscriptionsMessage, DescribeEventSubscriptionsError>; /// <p>Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.</p> fn describe_events( &self, input: DescribeEventsMessage, ) -> RusotoFuture<EventsMessage, DescribeEventsError>; /// <p>Returns a list of orderable DB instance options for the specified engine.</p> fn describe_orderable_db_instance_options( &self, input: DescribeOrderableDBInstanceOptionsMessage, ) -> RusotoFuture<OrderableDBInstanceOptionsMessage, DescribeOrderableDBInstanceOptionsError>; /// <p>Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.</p> fn describe_pending_maintenance_actions( &self, input: DescribePendingMaintenanceActionsMessage, ) -> RusotoFuture<PendingMaintenanceActionsMessage, DescribePendingMaintenanceActionsError>; /// <p>You can call <a>DescribeValidDBInstanceModifications</a> to learn what modifications you can make to your DB instance. You can use this information when you call <a>ModifyDBInstance</a>. </p> fn describe_valid_db_instance_modifications( &self, input: DescribeValidDBInstanceModificationsMessage, ) -> RusotoFuture< DescribeValidDBInstanceModificationsResult, DescribeValidDBInstanceModificationsError, >; /// <p>Forces a failover for a DB cluster.</p> <p>A failover for a DB cluster promotes one of the Read Replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).</p> <p>Amazon Neptune will automatically fail over to a Read Replica, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a primary instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.</p> fn failover_db_cluster( &self, input: FailoverDBClusterMessage, ) -> RusotoFuture<FailoverDBClusterResult, FailoverDBClusterError>; /// <p>Lists all tags on an Amazon Neptune resource.</p> fn list_tags_for_resource( &self, input: ListTagsForResourceMessage, ) -> RusotoFuture<TagListMessage, ListTagsForResourceError>; /// <p>Modify a setting for a DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. </p> fn modify_db_cluster( &self, input: ModifyDBClusterMessage, ) -> RusotoFuture<ModifyDBClusterResult, ModifyDBClusterError>; /// <p><p> Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: <code>ParameterName</code>, <code>ParameterValue</code>, and <code>ApplyMethod</code>. A maximum of 20 parameters can be modified in a single request. </p> <note> <p>Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB cluster associated with the parameter group before the change can take effect.</p> </note> <important> <p>After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon Neptune to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the <code>character<em>set</em>database</code> parameter. You can use the <i>Parameter Groups</i> option of the Amazon Neptune console or the <a>DescribeDBClusterParameters</a> command to verify that your DB cluster parameter group has been created or modified.</p> </important></p> fn modify_db_cluster_parameter_group( &self, input: ModifyDBClusterParameterGroupMessage, ) -> RusotoFuture<DBClusterParameterGroupNameMessage, ModifyDBClusterParameterGroupError>; /// <p>Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.</p> <p>To share a manual DB cluster snapshot with other AWS accounts, specify <code>restore</code> as the <code>AttributeName</code> and use the <code>ValuesToAdd</code> parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value <code>all</code> to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the <code>all</code> value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the <code>ValuesToAdd</code> parameter. You can't use <code>all</code> as a value for that parameter in this case.</p> <p>To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the <a>DescribeDBClusterSnapshotAttributes</a> API action.</p> fn modify_db_cluster_snapshot_attribute( &self, input: ModifyDBClusterSnapshotAttributeMessage, ) -> RusotoFuture<ModifyDBClusterSnapshotAttributeResult, ModifyDBClusterSnapshotAttributeError>; /// <p>Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call <a>DescribeValidDBInstanceModifications</a> before you call <a>ModifyDBInstance</a>. </p> fn modify_db_instance( &self, input: ModifyDBInstanceMessage, ) -> RusotoFuture<ModifyDBInstanceResult, ModifyDBInstanceError>; /// <p><p> Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: <code>ParameterName</code>, <code>ParameterValue</code>, and <code>ApplyMethod</code>. A maximum of 20 parameters can be modified in a single request. </p> <note> <p>Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.</p> </note> <important> <p>After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon Neptune to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the <code>character<em>set</em>database</code> parameter. You can use the <i>Parameter Groups</i> option of the Amazon Neptune console or the <i>DescribeDBParameters</i> command to verify that your DB parameter group has been created or modified.</p> </important></p> fn modify_db_parameter_group( &self, input: ModifyDBParameterGroupMessage, ) -> RusotoFuture<DBParameterGroupNameMessage, ModifyDBParameterGroupError>; /// <p>Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.</p> fn modify_db_subnet_group( &self, input: ModifyDBSubnetGroupMessage, ) -> RusotoFuture<ModifyDBSubnetGroupResult, ModifyDBSubnetGroupError>; /// <p>Modifies an existing event notification subscription. Note that you can't modify the source identifiers using this call; to change source identifiers for a subscription, use the <a>AddSourceIdentifierToSubscription</a> and <a>RemoveSourceIdentifierFromSubscription</a> calls.</p> <p>You can see a list of the event categories for a given SourceType by using the <b>DescribeEventCategories</b> action.</p> fn modify_event_subscription( &self, input: ModifyEventSubscriptionMessage, ) -> RusotoFuture<ModifyEventSubscriptionResult, ModifyEventSubscriptionError>; /// <p>Promotes a Read Replica DB cluster to a standalone DB cluster.</p> fn promote_read_replica_db_cluster( &self, input: PromoteReadReplicaDBClusterMessage, ) -> RusotoFuture<PromoteReadReplicaDBClusterResult, PromoteReadReplicaDBClusterError>; /// <p>You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect. </p> <p>Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting. </p> fn reboot_db_instance( &self, input: RebootDBInstanceMessage, ) -> RusotoFuture<RebootDBInstanceResult, RebootDBInstanceError>; /// <p>Disassociates an Identity and Access Management (IAM) role from a DB cluster. </p> fn remove_role_from_db_cluster( &self, input: RemoveRoleFromDBClusterMessage, ) -> RusotoFuture<(), RemoveRoleFromDBClusterError>; /// <p>Removes a source identifier from an existing event notification subscription.</p> fn remove_source_identifier_from_subscription( &self, input: RemoveSourceIdentifierFromSubscriptionMessage, ) -> RusotoFuture< RemoveSourceIdentifierFromSubscriptionResult, RemoveSourceIdentifierFromSubscriptionError, >; /// <p>Removes metadata tags from an Amazon Neptune resource.</p> fn remove_tags_from_resource( &self, input: RemoveTagsFromResourceMessage, ) -> RusotoFuture<(), RemoveTagsFromResourceError>; /// <p> Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: <code>ParameterName</code> and <code>ApplyMethod</code>. To reset the entire DB cluster parameter group, specify the <code>DBClusterParameterGroupName</code> and <code>ResetAllParameters</code> parameters. </p> <p> When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to <code>pending-reboot</code> to take effect on the next DB instance restart or <a>RebootDBInstance</a> request. You must call <a>RebootDBInstance</a> for every DB instance in your DB cluster that you want the updated static parameter to apply to.</p> fn reset_db_cluster_parameter_group( &self, input: ResetDBClusterParameterGroupMessage, ) -> RusotoFuture<DBClusterParameterGroupNameMessage, ResetDBClusterParameterGroupError>; /// <p>Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters, provide a list of the following: <code>ParameterName</code> and <code>ApplyMethod</code>. To reset the entire DB parameter group, specify the <code>DBParameterGroup</code> name and <code>ResetAllParameters</code> parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to <code>pending-reboot</code> to take effect on the next DB instance restart or <code>RebootDBInstance</code> request. </p> fn reset_db_parameter_group( &self, input: ResetDBParameterGroupMessage, ) -> RusotoFuture<DBParameterGroupNameMessage, ResetDBParameterGroupError>; /// <p>Creates a new DB cluster from a DB snapshot or DB cluster snapshot.</p> <p>If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.</p> <p>If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.</p> fn restore_db_cluster_from_snapshot( &self, input: RestoreDBClusterFromSnapshotMessage, ) -> RusotoFuture<RestoreDBClusterFromSnapshotResult, RestoreDBClusterFromSnapshotError>; /// <p><p>Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before <code>LatestRestorableTime</code> for up to <code>BackupRetentionPeriod</code> days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group. </p> <note> <p>This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the <a>CreateDBInstance</a> action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in <code>DBClusterIdentifier</code>. You can create DB instances only after the <code>RestoreDBClusterToPointInTime</code> action has completed and the DB cluster is available.</p> </note></p> fn restore_db_cluster_to_point_in_time( &self, input: RestoreDBClusterToPointInTimeMessage, ) -> RusotoFuture<RestoreDBClusterToPointInTimeResult, RestoreDBClusterToPointInTimeError>; } /// A client for the Amazon Neptune API. pub struct NeptuneClient { client: Client, region: region::Region, } impl NeptuneClient { /// Creates a client backed by the default tokio event loop. /// /// The client will use the default credentials provider and tls client. pub fn new(region: region::Region) -> NeptuneClient { NeptuneClient { client: Client::shared(), region: region, } } pub fn new_with<P, D>( request_dispatcher: D, credentials_provider: P, region: region::Region, ) -> NeptuneClient where P: ProvideAwsCredentials + Send + Sync + 'static, P::Future: Send, D: DispatchSignedRequest + Send + Sync + 'static, D::Future: Send, { NeptuneClient { client: Client::new_with(credentials_provider, request_dispatcher), region: region, } } } impl Neptune for NeptuneClient { /// <p>Associates an Identity and Access Management (IAM) role from an Neptune DB cluster. </p> fn add_role_to_db_cluster( &self, input: AddRoleToDBClusterMessage, ) -> RusotoFuture<(), AddRoleToDBClusterError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "AddRoleToDBCluster"); params.put("Version", "2014-10-31"); AddRoleToDBClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(AddRoleToDBClusterError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(future::ok(::std::mem::drop(response))) }) } /// <p>Adds a source identifier to an existing event notification subscription.</p> fn add_source_identifier_to_subscription( &self, input: AddSourceIdentifierToSubscriptionMessage, ) -> RusotoFuture<AddSourceIdentifierToSubscriptionResult, AddSourceIdentifierToSubscriptionError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "AddSourceIdentifierToSubscription"); params.put("Version", "2014-10-31"); AddSourceIdentifierToSubscriptionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(AddSourceIdentifierToSubscriptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = AddSourceIdentifierToSubscriptionResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( AddSourceIdentifierToSubscriptionResultDeserializer::deserialize( "AddSourceIdentifierToSubscriptionResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Adds metadata tags to an Amazon Neptune resource. These tags can also be used with cost allocation reporting to track cost associated with Amazon Neptune resources, or used in a Condition statement in an IAM policy for Amazon Neptune.</p> fn add_tags_to_resource( &self, input: AddTagsToResourceMessage, ) -> RusotoFuture<(), AddTagsToResourceError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "AddTagsToResource"); params.put("Version", "2014-10-31"); AddTagsToResourceMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(AddTagsToResourceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(future::ok(::std::mem::drop(response))) }) } /// <p>Applies a pending maintenance action to a resource (for example, to a DB instance).</p> fn apply_pending_maintenance_action( &self, input: ApplyPendingMaintenanceActionMessage, ) -> RusotoFuture<ApplyPendingMaintenanceActionResult, ApplyPendingMaintenanceActionError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ApplyPendingMaintenanceAction"); params.put("Version", "2014-10-31"); ApplyPendingMaintenanceActionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ApplyPendingMaintenanceActionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = ApplyPendingMaintenanceActionResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( ApplyPendingMaintenanceActionResultDeserializer::deserialize( "ApplyPendingMaintenanceActionResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Copies the specified DB cluster parameter group.</p> fn copy_db_cluster_parameter_group( &self, input: CopyDBClusterParameterGroupMessage, ) -> RusotoFuture<CopyDBClusterParameterGroupResult, CopyDBClusterParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CopyDBClusterParameterGroup"); params.put("Version", "2014-10-31"); CopyDBClusterParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CopyDBClusterParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CopyDBClusterParameterGroupResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(CopyDBClusterParameterGroupResultDeserializer::deserialize( "CopyDBClusterParameterGroupResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Copies a snapshot of a DB cluster.</p> <p>To copy a DB cluster snapshot from a shared manual DB cluster snapshot, <code>SourceDBClusterSnapshotIdentifier</code> must be the Amazon Resource Name (ARN) of the shared DB cluster snapshot.</p> <p>You can copy an encrypted DB cluster snapshot from another AWS Region. In that case, the AWS Region where you call the <code>CopyDBClusterSnapshot</code> action is the destination AWS Region for the encrypted DB cluster snapshot to be copied to. To copy an encrypted DB cluster snapshot from another AWS Region, you must provide the following values:</p> <ul> <li> <p> <code>KmsKeyId</code> - The AWS Key Management System (AWS KMS) key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region.</p> </li> <li> <p> <code>PreSignedUrl</code> - A URL that contains a Signature Version 4 signed request for the <code>CopyDBClusterSnapshot</code> action to be called in the source AWS Region where the DB cluster snapshot is copied from. The pre-signed URL must be a valid request for the <code>CopyDBClusterSnapshot</code> API action that can be executed in the source AWS Region that contains the encrypted DB cluster snapshot to be copied.</p> <p>The pre-signed URL request must contain the following parameter values:</p> <ul> <li> <p> <code>KmsKeyId</code> - The KMS key identifier for the key to use to encrypt the copy of the DB cluster snapshot in the destination AWS Region. This is the same identifier for both the <code>CopyDBClusterSnapshot</code> action that is called in the destination AWS Region, and the action contained in the pre-signed URL.</p> </li> <li> <p> <code>DestinationRegion</code> - The name of the AWS Region that the DB cluster snapshot will be created in.</p> </li> <li> <p> <code>SourceDBClusterSnapshotIdentifier</code> - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the Amazon Resource Name (ARN) format for the source AWS Region. For example, if you are copying an encrypted DB cluster snapshot from the us-west-2 AWS Region, then your <code>SourceDBClusterSnapshotIdentifier</code> looks like the following example: <code>arn:aws:rds:us-west-2:123456789012:cluster-snapshot:neptune-cluster1-snapshot-20161115</code>.</p> </li> </ul> <p>To learn how to generate a Signature Version 4 signed request, see <a href="http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html"> Authenticating Requests: Using Query Parameters (AWS Signature Version 4)</a> and <a href="http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html"> Signature Version 4 Signing Process</a>.</p> </li> <li> <p> <code>TargetDBClusterSnapshotIdentifier</code> - The identifier for the new copy of the DB cluster snapshot in the destination AWS Region.</p> </li> <li> <p> <code>SourceDBClusterSnapshotIdentifier</code> - The DB cluster snapshot identifier for the encrypted DB cluster snapshot to be copied. This identifier must be in the ARN format for the source AWS Region and is the same value as the <code>SourceDBClusterSnapshotIdentifier</code> in the pre-signed URL. </p> </li> </ul> <p>To cancel the copy operation once it is in progress, delete the target DB cluster snapshot identified by <code>TargetDBClusterSnapshotIdentifier</code> while that DB cluster snapshot is in "copying" status.</p> fn copy_db_cluster_snapshot( &self, input: CopyDBClusterSnapshotMessage, ) -> RusotoFuture<CopyDBClusterSnapshotResult, CopyDBClusterSnapshotError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CopyDBClusterSnapshot"); params.put("Version", "2014-10-31"); CopyDBClusterSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CopyDBClusterSnapshotError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CopyDBClusterSnapshotResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(CopyDBClusterSnapshotResultDeserializer::deserialize( "CopyDBClusterSnapshotResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Copies the specified DB parameter group.</p> fn copy_db_parameter_group( &self, input: CopyDBParameterGroupMessage, ) -> RusotoFuture<CopyDBParameterGroupResult, CopyDBParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CopyDBParameterGroup"); params.put("Version", "2014-10-31"); CopyDBParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CopyDBParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CopyDBParameterGroupResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(CopyDBParameterGroupResultDeserializer::deserialize( "CopyDBParameterGroupResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Creates a new Amazon Neptune DB cluster.</p> <p>You can use the <code>ReplicationSourceIdentifier</code> parameter to create the DB cluster as a Read Replica of another DB cluster or Amazon Neptune DB instance. For cross-region replication where the DB cluster identified by <code>ReplicationSourceIdentifier</code> is encrypted, you must also specify the <code>PreSignedUrl</code> parameter.</p> fn create_db_cluster( &self, input: CreateDBClusterMessage, ) -> RusotoFuture<CreateDBClusterResult, CreateDBClusterError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CreateDBCluster"); params.put("Version", "2014-10-31"); CreateDBClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CreateDBClusterError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CreateDBClusterResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(CreateDBClusterResultDeserializer::deserialize( "CreateDBClusterResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p><p>Creates a new DB cluster parameter group.</p> <p>Parameters in a DB cluster parameter group apply to all of the instances in a DB cluster.</p> <p> A DB cluster parameter group is initially created with the default parameters for the database engine used by instances in the DB cluster. To provide custom values for any of the parameters, you must modify the group after creating it using <a>ModifyDBClusterParameterGroup</a>. Once you&#39;ve created a DB cluster parameter group, you need to associate it with your DB cluster using <a>ModifyDBCluster</a>. When you associate a new DB cluster parameter group with a running DB cluster, you need to reboot the DB instances in the DB cluster without failover for the new DB cluster parameter group and associated settings to take effect. </p> <important> <p>After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon Neptune to fully complete the create action before the DB cluster parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the <code>character<em>set</em>database</code> parameter. You can use the <i>Parameter Groups</i> option of the <a href="https://console.aws.amazon.com/rds/">Amazon Neptune console</a> or the <a>DescribeDBClusterParameters</a> command to verify that your DB cluster parameter group has been created or modified.</p> </important></p> fn create_db_cluster_parameter_group( &self, input: CreateDBClusterParameterGroupMessage, ) -> RusotoFuture<CreateDBClusterParameterGroupResult, CreateDBClusterParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CreateDBClusterParameterGroup"); params.put("Version", "2014-10-31"); CreateDBClusterParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CreateDBClusterParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CreateDBClusterParameterGroupResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( CreateDBClusterParameterGroupResultDeserializer::deserialize( "CreateDBClusterParameterGroupResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Creates a snapshot of a DB cluster. </p> fn create_db_cluster_snapshot( &self, input: CreateDBClusterSnapshotMessage, ) -> RusotoFuture<CreateDBClusterSnapshotResult, CreateDBClusterSnapshotError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CreateDBClusterSnapshot"); params.put("Version", "2014-10-31"); CreateDBClusterSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CreateDBClusterSnapshotError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CreateDBClusterSnapshotResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(CreateDBClusterSnapshotResultDeserializer::deserialize( "CreateDBClusterSnapshotResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Creates a new DB instance.</p> fn create_db_instance( &self, input: CreateDBInstanceMessage, ) -> RusotoFuture<CreateDBInstanceResult, CreateDBInstanceError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CreateDBInstance"); params.put("Version", "2014-10-31"); CreateDBInstanceMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CreateDBInstanceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CreateDBInstanceResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(CreateDBInstanceResultDeserializer::deserialize( "CreateDBInstanceResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p><p>Creates a new DB parameter group.</p> <p> A DB parameter group is initially created with the default parameters for the database engine used by the DB instance. To provide custom values for any of the parameters, you must modify the group after creating it using <i>ModifyDBParameterGroup</i>. Once you&#39;ve created a DB parameter group, you need to associate it with your DB instance using <i>ModifyDBInstance</i>. When you associate a new DB parameter group with a running DB instance, you need to reboot the DB instance without failover for the new DB parameter group and associated settings to take effect. </p> <important> <p>After you create a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon Neptune to fully complete the create action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the <code>character<em>set</em>database</code> parameter. You can use the <i>Parameter Groups</i> option of the Amazon Neptune console or the <i>DescribeDBParameters</i> command to verify that your DB parameter group has been created or modified.</p> </important></p> fn create_db_parameter_group( &self, input: CreateDBParameterGroupMessage, ) -> RusotoFuture<CreateDBParameterGroupResult, CreateDBParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CreateDBParameterGroup"); params.put("Version", "2014-10-31"); CreateDBParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CreateDBParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CreateDBParameterGroupResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(CreateDBParameterGroupResultDeserializer::deserialize( "CreateDBParameterGroupResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Creates a new DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.</p> fn create_db_subnet_group( &self, input: CreateDBSubnetGroupMessage, ) -> RusotoFuture<CreateDBSubnetGroupResult, CreateDBSubnetGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CreateDBSubnetGroup"); params.put("Version", "2014-10-31"); CreateDBSubnetGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CreateDBSubnetGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CreateDBSubnetGroupResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(CreateDBSubnetGroupResultDeserializer::deserialize( "CreateDBSubnetGroupResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Creates an event notification subscription. This action requires a topic ARN (Amazon Resource Name) created by either the Neptune console, the SNS console, or the SNS API. To obtain an ARN with SNS, you must create a topic in Amazon SNS and subscribe to the topic. The ARN is displayed in the SNS console.</p> <p>You can specify the type of source (SourceType) you want to be notified of, provide a list of Neptune sources (SourceIds) that triggers the events, and provide a list of event categories (EventCategories) for events you want to be notified of. For example, you can specify SourceType = db-instance, SourceIds = mydbinstance1, mydbinstance2 and EventCategories = Availability, Backup.</p> <p>If you specify both the SourceType and SourceIds, such as SourceType = db-instance and SourceIdentifier = myDBInstance1, you are notified of all the db-instance events for the specified source. If you specify a SourceType but do not specify a SourceIdentifier, you receive notice of the events for that source type for all your Neptune sources. If you do not specify either the SourceType nor the SourceIdentifier, you are notified of events generated from all Neptune sources belonging to your customer account.</p> fn create_event_subscription( &self, input: CreateEventSubscriptionMessage, ) -> RusotoFuture<CreateEventSubscriptionResult, CreateEventSubscriptionError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "CreateEventSubscription"); params.put("Version", "2014-10-31"); CreateEventSubscriptionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(CreateEventSubscriptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = CreateEventSubscriptionResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(CreateEventSubscriptionResultDeserializer::deserialize( "CreateEventSubscriptionResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p><p>The DeleteDBCluster action deletes a previously provisioned DB cluster. When you delete a DB cluster, all automated backups for that DB cluster are deleted and can&#39;t be recovered. Manual DB cluster snapshots of the specified DB cluster are not deleted.</p> <p/></p> fn delete_db_cluster( &self, input: DeleteDBClusterMessage, ) -> RusotoFuture<DeleteDBClusterResult, DeleteDBClusterError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DeleteDBCluster"); params.put("Version", "2014-10-31"); DeleteDBClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DeleteDBClusterError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DeleteDBClusterResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DeleteDBClusterResultDeserializer::deserialize( "DeleteDBClusterResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Deletes a specified DB cluster parameter group. The DB cluster parameter group to be deleted can't be associated with any DB clusters.</p> fn delete_db_cluster_parameter_group( &self, input: DeleteDBClusterParameterGroupMessage, ) -> RusotoFuture<(), DeleteDBClusterParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DeleteDBClusterParameterGroup"); params.put("Version", "2014-10-31"); DeleteDBClusterParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DeleteDBClusterParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(future::ok(::std::mem::drop(response))) }) } /// <p><p>Deletes a DB cluster snapshot. If the snapshot is being copied, the copy operation is terminated.</p> <note> <p>The DB cluster snapshot must be in the <code>available</code> state to be deleted.</p> </note></p> fn delete_db_cluster_snapshot( &self, input: DeleteDBClusterSnapshotMessage, ) -> RusotoFuture<DeleteDBClusterSnapshotResult, DeleteDBClusterSnapshotError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DeleteDBClusterSnapshot"); params.put("Version", "2014-10-31"); DeleteDBClusterSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DeleteDBClusterSnapshotError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DeleteDBClusterSnapshotResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DeleteDBClusterSnapshotResultDeserializer::deserialize( "DeleteDBClusterSnapshotResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>The DeleteDBInstance action deletes a previously provisioned DB instance. When you delete a DB instance, all automated backups for that instance are deleted and can't be recovered. Manual DB snapshots of the DB instance to be deleted by <code>DeleteDBInstance</code> are not deleted.</p> <p> If you request a final DB snapshot the status of the Amazon Neptune DB instance is <code>deleting</code> until the DB snapshot is created. The API action <code>DescribeDBInstance</code> is used to monitor the status of this operation. The action can't be canceled or reverted once submitted. </p> <p>Note that when a DB instance is in a failure state and has a status of <code>failed</code>, <code>incompatible-restore</code>, or <code>incompatible-network</code>, you can only delete it when the <code>SkipFinalSnapshot</code> parameter is set to <code>true</code>.</p> <p>If the specified DB instance is part of a DB cluster, you can't delete the DB instance if both of the following conditions are true:</p> <ul> <li> <p>The DB cluster is a Read Replica of another DB cluster.</p> </li> <li> <p>The DB instance is the only instance in the DB cluster.</p> </li> </ul> <p>To delete a DB instance in this case, first call the <a>PromoteReadReplicaDBCluster</a> API action to promote the DB cluster so it's no longer a Read Replica. After the promotion completes, then call the <code>DeleteDBInstance</code> API action to delete the final instance in the DB cluster.</p> fn delete_db_instance( &self, input: DeleteDBInstanceMessage, ) -> RusotoFuture<DeleteDBInstanceResult, DeleteDBInstanceError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DeleteDBInstance"); params.put("Version", "2014-10-31"); DeleteDBInstanceMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DeleteDBInstanceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DeleteDBInstanceResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DeleteDBInstanceResultDeserializer::deserialize( "DeleteDBInstanceResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Deletes a specified DBParameterGroup. The DBParameterGroup to be deleted can't be associated with any DB instances.</p> fn delete_db_parameter_group( &self, input: DeleteDBParameterGroupMessage, ) -> RusotoFuture<(), DeleteDBParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DeleteDBParameterGroup"); params.put("Version", "2014-10-31"); DeleteDBParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DeleteDBParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(future::ok(::std::mem::drop(response))) }) } /// <p><p>Deletes a DB subnet group.</p> <note> <p>The specified database subnet group must not be associated with any DB instances.</p> </note></p> fn delete_db_subnet_group( &self, input: DeleteDBSubnetGroupMessage, ) -> RusotoFuture<(), DeleteDBSubnetGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DeleteDBSubnetGroup"); params.put("Version", "2014-10-31"); DeleteDBSubnetGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DeleteDBSubnetGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(future::ok(::std::mem::drop(response))) }) } /// <p>Deletes an event notification subscription.</p> fn delete_event_subscription( &self, input: DeleteEventSubscriptionMessage, ) -> RusotoFuture<DeleteEventSubscriptionResult, DeleteEventSubscriptionError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DeleteEventSubscription"); params.put("Version", "2014-10-31"); DeleteEventSubscriptionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DeleteEventSubscriptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DeleteEventSubscriptionResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DeleteEventSubscriptionResultDeserializer::deserialize( "DeleteEventSubscriptionResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p> Returns a list of <code>DBClusterParameterGroup</code> descriptions. If a <code>DBClusterParameterGroupName</code> parameter is specified, the list will contain only the description of the specified DB cluster parameter group. </p> fn describe_db_cluster_parameter_groups( &self, input: DescribeDBClusterParameterGroupsMessage, ) -> RusotoFuture<DBClusterParameterGroupsMessage, DescribeDBClusterParameterGroupsError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBClusterParameterGroups"); params.put("Version", "2014-10-31"); DescribeDBClusterParameterGroupsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBClusterParameterGroupsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBClusterParameterGroupsMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBClusterParameterGroupsMessageDeserializer::deserialize( "DescribeDBClusterParameterGroupsResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns the detailed parameter list for a particular DB cluster parameter group.</p> fn describe_db_cluster_parameters( &self, input: DescribeDBClusterParametersMessage, ) -> RusotoFuture<DBClusterParameterGroupDetails, DescribeDBClusterParametersError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBClusterParameters"); params.put("Version", "2014-10-31"); DescribeDBClusterParametersMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBClusterParametersError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBClusterParameterGroupDetails::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBClusterParameterGroupDetailsDeserializer::deserialize( "DescribeDBClusterParametersResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns a list of DB cluster snapshot attribute names and values for a manual DB cluster snapshot.</p> <p>When sharing snapshots with other AWS accounts, <code>DescribeDBClusterSnapshotAttributes</code> returns the <code>restore</code> attribute and a list of IDs for the AWS accounts that are authorized to copy or restore the manual DB cluster snapshot. If <code>all</code> is included in the list of values for the <code>restore</code> attribute, then the manual DB cluster snapshot is public and can be copied or restored by all AWS accounts.</p> <p>To add or remove access for an AWS account to copy or restore a manual DB cluster snapshot, or to make the manual DB cluster snapshot public or private, use the <a>ModifyDBClusterSnapshotAttribute</a> API action.</p> fn describe_db_cluster_snapshot_attributes( &self, input: DescribeDBClusterSnapshotAttributesMessage, ) -> RusotoFuture< DescribeDBClusterSnapshotAttributesResult, DescribeDBClusterSnapshotAttributesError, > { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBClusterSnapshotAttributes"); params.put("Version", "2014-10-31"); DescribeDBClusterSnapshotAttributesMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBClusterSnapshotAttributesError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DescribeDBClusterSnapshotAttributesResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( DescribeDBClusterSnapshotAttributesResultDeserializer::deserialize( "DescribeDBClusterSnapshotAttributesResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns information about DB cluster snapshots. This API action supports pagination.</p> fn describe_db_cluster_snapshots( &self, input: DescribeDBClusterSnapshotsMessage, ) -> RusotoFuture<DBClusterSnapshotMessage, DescribeDBClusterSnapshotsError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBClusterSnapshots"); params.put("Version", "2014-10-31"); DescribeDBClusterSnapshotsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBClusterSnapshotsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBClusterSnapshotMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBClusterSnapshotMessageDeserializer::deserialize( "DescribeDBClusterSnapshotsResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns information about provisioned DB clusters. This API supports pagination.</p> fn describe_db_clusters( &self, input: DescribeDBClustersMessage, ) -> RusotoFuture<DBClusterMessage, DescribeDBClustersError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBClusters"); params.put("Version", "2014-10-31"); DescribeDBClustersMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBClustersError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBClusterMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBClusterMessageDeserializer::deserialize( "DescribeDBClustersResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns a list of the available DB engines.</p> fn describe_db_engine_versions( &self, input: DescribeDBEngineVersionsMessage, ) -> RusotoFuture<DBEngineVersionMessage, DescribeDBEngineVersionsError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBEngineVersions"); params.put("Version", "2014-10-31"); DescribeDBEngineVersionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBEngineVersionsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBEngineVersionMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBEngineVersionMessageDeserializer::deserialize( "DescribeDBEngineVersionsResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns information about provisioned instances. This API supports pagination.</p> fn describe_db_instances( &self, input: DescribeDBInstancesMessage, ) -> RusotoFuture<DBInstanceMessage, DescribeDBInstancesError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBInstances"); params.put("Version", "2014-10-31"); DescribeDBInstancesMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBInstancesError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBInstanceMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBInstanceMessageDeserializer::deserialize( "DescribeDBInstancesResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p> Returns a list of <code>DBParameterGroup</code> descriptions. If a <code>DBParameterGroupName</code> is specified, the list will contain only the description of the specified DB parameter group. </p> fn describe_db_parameter_groups( &self, input: DescribeDBParameterGroupsMessage, ) -> RusotoFuture<DBParameterGroupsMessage, DescribeDBParameterGroupsError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBParameterGroups"); params.put("Version", "2014-10-31"); DescribeDBParameterGroupsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBParameterGroupsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBParameterGroupsMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBParameterGroupsMessageDeserializer::deserialize( "DescribeDBParameterGroupsResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns the detailed parameter list for a particular DB parameter group.</p> fn describe_db_parameters( &self, input: DescribeDBParametersMessage, ) -> RusotoFuture<DBParameterGroupDetails, DescribeDBParametersError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBParameters"); params.put("Version", "2014-10-31"); DescribeDBParametersMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBParametersError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBParameterGroupDetails::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBParameterGroupDetailsDeserializer::deserialize( "DescribeDBParametersResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns a list of DBSubnetGroup descriptions. If a DBSubnetGroupName is specified, the list will contain only the descriptions of the specified DBSubnetGroup.</p> <p>For an overview of CIDR ranges, go to the <a href="http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing">Wikipedia Tutorial</a>. </p> fn describe_db_subnet_groups( &self, input: DescribeDBSubnetGroupsMessage, ) -> RusotoFuture<DBSubnetGroupMessage, DescribeDBSubnetGroupsError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeDBSubnetGroups"); params.put("Version", "2014-10-31"); DescribeDBSubnetGroupsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeDBSubnetGroupsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBSubnetGroupMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBSubnetGroupMessageDeserializer::deserialize( "DescribeDBSubnetGroupsResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns the default engine and system parameter information for the cluster database engine.</p> fn describe_engine_default_cluster_parameters( &self, input: DescribeEngineDefaultClusterParametersMessage, ) -> RusotoFuture< DescribeEngineDefaultClusterParametersResult, DescribeEngineDefaultClusterParametersError, > { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeEngineDefaultClusterParameters"); params.put("Version", "2014-10-31"); DescribeEngineDefaultClusterParametersMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeEngineDefaultClusterParametersError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DescribeEngineDefaultClusterParametersResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( DescribeEngineDefaultClusterParametersResultDeserializer::deserialize( "DescribeEngineDefaultClusterParametersResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns the default engine and system parameter information for the specified database engine.</p> fn describe_engine_default_parameters( &self, input: DescribeEngineDefaultParametersMessage, ) -> RusotoFuture<DescribeEngineDefaultParametersResult, DescribeEngineDefaultParametersError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeEngineDefaultParameters"); params.put("Version", "2014-10-31"); DescribeEngineDefaultParametersMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeEngineDefaultParametersError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DescribeEngineDefaultParametersResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( DescribeEngineDefaultParametersResultDeserializer::deserialize( "DescribeEngineDefaultParametersResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Displays a list of categories for all event source types, or, if specified, for a specified source type. </p> fn describe_event_categories( &self, input: DescribeEventCategoriesMessage, ) -> RusotoFuture<EventCategoriesMessage, DescribeEventCategoriesError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeEventCategories"); params.put("Version", "2014-10-31"); DescribeEventCategoriesMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeEventCategoriesError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = EventCategoriesMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(EventCategoriesMessageDeserializer::deserialize( "DescribeEventCategoriesResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Lists all the subscription descriptions for a customer account. The description for a subscription includes SubscriptionName, SNSTopicARN, CustomerID, SourceType, SourceID, CreationTime, and Status.</p> <p>If you specify a SubscriptionName, lists the description for that subscription.</p> fn describe_event_subscriptions( &self, input: DescribeEventSubscriptionsMessage, ) -> RusotoFuture<EventSubscriptionsMessage, DescribeEventSubscriptionsError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeEventSubscriptions"); params.put("Version", "2014-10-31"); DescribeEventSubscriptionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeEventSubscriptionsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = EventSubscriptionsMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(EventSubscriptionsMessageDeserializer::deserialize( "DescribeEventSubscriptionsResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns events related to DB instances, DB security groups, DB snapshots, and DB parameter groups for the past 14 days. Events specific to a particular DB instance, DB security group, database snapshot, or DB parameter group can be obtained by providing the name as a parameter. By default, the past hour of events are returned.</p> fn describe_events( &self, input: DescribeEventsMessage, ) -> RusotoFuture<EventsMessage, DescribeEventsError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeEvents"); params.put("Version", "2014-10-31"); DescribeEventsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeEventsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = EventsMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(EventsMessageDeserializer::deserialize( "DescribeEventsResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns a list of orderable DB instance options for the specified engine.</p> fn describe_orderable_db_instance_options( &self, input: DescribeOrderableDBInstanceOptionsMessage, ) -> RusotoFuture<OrderableDBInstanceOptionsMessage, DescribeOrderableDBInstanceOptionsError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeOrderableDBInstanceOptions"); params.put("Version", "2014-10-31"); DescribeOrderableDBInstanceOptionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeOrderableDBInstanceOptionsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = OrderableDBInstanceOptionsMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(OrderableDBInstanceOptionsMessageDeserializer::deserialize( "DescribeOrderableDBInstanceOptionsResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Returns a list of resources (for example, DB instances) that have at least one pending maintenance action.</p> fn describe_pending_maintenance_actions( &self, input: DescribePendingMaintenanceActionsMessage, ) -> RusotoFuture<PendingMaintenanceActionsMessage, DescribePendingMaintenanceActionsError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribePendingMaintenanceActions"); params.put("Version", "2014-10-31"); DescribePendingMaintenanceActionsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribePendingMaintenanceActionsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = PendingMaintenanceActionsMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(PendingMaintenanceActionsMessageDeserializer::deserialize( "DescribePendingMaintenanceActionsResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>You can call <a>DescribeValidDBInstanceModifications</a> to learn what modifications you can make to your DB instance. You can use this information when you call <a>ModifyDBInstance</a>. </p> fn describe_valid_db_instance_modifications( &self, input: DescribeValidDBInstanceModificationsMessage, ) -> RusotoFuture< DescribeValidDBInstanceModificationsResult, DescribeValidDBInstanceModificationsError, > { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "DescribeValidDBInstanceModifications"); params.put("Version", "2014-10-31"); DescribeValidDBInstanceModificationsMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(DescribeValidDBInstanceModificationsError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DescribeValidDBInstanceModificationsResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( DescribeValidDBInstanceModificationsResultDeserializer::deserialize( "DescribeValidDBInstanceModificationsResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Forces a failover for a DB cluster.</p> <p>A failover for a DB cluster promotes one of the Read Replicas (read-only instances) in the DB cluster to be the primary instance (the cluster writer).</p> <p>Amazon Neptune will automatically fail over to a Read Replica, if one exists, when the primary instance fails. You can force a failover when you want to simulate a failure of a primary instance for testing. Because each instance in a DB cluster has its own endpoint address, you will need to clean up and re-establish any existing connections that use those endpoint addresses when the failover is complete.</p> fn failover_db_cluster( &self, input: FailoverDBClusterMessage, ) -> RusotoFuture<FailoverDBClusterResult, FailoverDBClusterError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "FailoverDBCluster"); params.put("Version", "2014-10-31"); FailoverDBClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(FailoverDBClusterError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = FailoverDBClusterResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(FailoverDBClusterResultDeserializer::deserialize( "FailoverDBClusterResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Lists all tags on an Amazon Neptune resource.</p> fn list_tags_for_resource( &self, input: ListTagsForResourceMessage, ) -> RusotoFuture<TagListMessage, ListTagsForResourceError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ListTagsForResource"); params.put("Version", "2014-10-31"); ListTagsForResourceMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ListTagsForResourceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = TagListMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(TagListMessageDeserializer::deserialize( "ListTagsForResourceResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Modify a setting for a DB cluster. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. </p> fn modify_db_cluster( &self, input: ModifyDBClusterMessage, ) -> RusotoFuture<ModifyDBClusterResult, ModifyDBClusterError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ModifyDBCluster"); params.put("Version", "2014-10-31"); ModifyDBClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ModifyDBClusterError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = ModifyDBClusterResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(ModifyDBClusterResultDeserializer::deserialize( "ModifyDBClusterResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p><p> Modifies the parameters of a DB cluster parameter group. To modify more than one parameter, submit a list of the following: <code>ParameterName</code>, <code>ParameterValue</code>, and <code>ApplyMethod</code>. A maximum of 20 parameters can be modified in a single request. </p> <note> <p>Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB cluster associated with the parameter group before the change can take effect.</p> </note> <important> <p>After you create a DB cluster parameter group, you should wait at least 5 minutes before creating your first DB cluster that uses that DB cluster parameter group as the default parameter group. This allows Amazon Neptune to fully complete the create action before the parameter group is used as the default for a new DB cluster. This is especially important for parameters that are critical when creating the default database for a DB cluster, such as the character set for the default database defined by the <code>character<em>set</em>database</code> parameter. You can use the <i>Parameter Groups</i> option of the Amazon Neptune console or the <a>DescribeDBClusterParameters</a> command to verify that your DB cluster parameter group has been created or modified.</p> </important></p> fn modify_db_cluster_parameter_group( &self, input: ModifyDBClusterParameterGroupMessage, ) -> RusotoFuture<DBClusterParameterGroupNameMessage, ModifyDBClusterParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ModifyDBClusterParameterGroup"); params.put("Version", "2014-10-31"); ModifyDBClusterParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ModifyDBClusterParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBClusterParameterGroupNameMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBClusterParameterGroupNameMessageDeserializer::deserialize( "ModifyDBClusterParameterGroupResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Adds an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot.</p> <p>To share a manual DB cluster snapshot with other AWS accounts, specify <code>restore</code> as the <code>AttributeName</code> and use the <code>ValuesToAdd</code> parameter to add a list of IDs of the AWS accounts that are authorized to restore the manual DB cluster snapshot. Use the value <code>all</code> to make the manual DB cluster snapshot public, which means that it can be copied or restored by all AWS accounts. Do not add the <code>all</code> value for any manual DB cluster snapshots that contain private information that you don't want available to all AWS accounts. If a manual DB cluster snapshot is encrypted, it can be shared, but only by specifying a list of authorized AWS account IDs for the <code>ValuesToAdd</code> parameter. You can't use <code>all</code> as a value for that parameter in this case.</p> <p>To view which AWS accounts have access to copy or restore a manual DB cluster snapshot, or whether a manual DB cluster snapshot public or private, use the <a>DescribeDBClusterSnapshotAttributes</a> API action.</p> fn modify_db_cluster_snapshot_attribute( &self, input: ModifyDBClusterSnapshotAttributeMessage, ) -> RusotoFuture<ModifyDBClusterSnapshotAttributeResult, ModifyDBClusterSnapshotAttributeError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ModifyDBClusterSnapshotAttribute"); params.put("Version", "2014-10-31"); ModifyDBClusterSnapshotAttributeMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ModifyDBClusterSnapshotAttributeError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = ModifyDBClusterSnapshotAttributeResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( ModifyDBClusterSnapshotAttributeResultDeserializer::deserialize( "ModifyDBClusterSnapshotAttributeResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Modifies settings for a DB instance. You can change one or more database configuration parameters by specifying these parameters and the new values in the request. To learn what modifications you can make to your DB instance, call <a>DescribeValidDBInstanceModifications</a> before you call <a>ModifyDBInstance</a>. </p> fn modify_db_instance( &self, input: ModifyDBInstanceMessage, ) -> RusotoFuture<ModifyDBInstanceResult, ModifyDBInstanceError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ModifyDBInstance"); params.put("Version", "2014-10-31"); ModifyDBInstanceMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ModifyDBInstanceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = ModifyDBInstanceResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(ModifyDBInstanceResultDeserializer::deserialize( "ModifyDBInstanceResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p><p> Modifies the parameters of a DB parameter group. To modify more than one parameter, submit a list of the following: <code>ParameterName</code>, <code>ParameterValue</code>, and <code>ApplyMethod</code>. A maximum of 20 parameters can be modified in a single request. </p> <note> <p>Changes to dynamic parameters are applied immediately. Changes to static parameters require a reboot without failover to the DB instance associated with the parameter group before the change can take effect.</p> </note> <important> <p>After you modify a DB parameter group, you should wait at least 5 minutes before creating your first DB instance that uses that DB parameter group as the default parameter group. This allows Amazon Neptune to fully complete the modify action before the parameter group is used as the default for a new DB instance. This is especially important for parameters that are critical when creating the default database for a DB instance, such as the character set for the default database defined by the <code>character<em>set</em>database</code> parameter. You can use the <i>Parameter Groups</i> option of the Amazon Neptune console or the <i>DescribeDBParameters</i> command to verify that your DB parameter group has been created or modified.</p> </important></p> fn modify_db_parameter_group( &self, input: ModifyDBParameterGroupMessage, ) -> RusotoFuture<DBParameterGroupNameMessage, ModifyDBParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ModifyDBParameterGroup"); params.put("Version", "2014-10-31"); ModifyDBParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ModifyDBParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBParameterGroupNameMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBParameterGroupNameMessageDeserializer::deserialize( "ModifyDBParameterGroupResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Modifies an existing DB subnet group. DB subnet groups must contain at least one subnet in at least two AZs in the AWS Region.</p> fn modify_db_subnet_group( &self, input: ModifyDBSubnetGroupMessage, ) -> RusotoFuture<ModifyDBSubnetGroupResult, ModifyDBSubnetGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ModifyDBSubnetGroup"); params.put("Version", "2014-10-31"); ModifyDBSubnetGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ModifyDBSubnetGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = ModifyDBSubnetGroupResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(ModifyDBSubnetGroupResultDeserializer::deserialize( "ModifyDBSubnetGroupResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Modifies an existing event notification subscription. Note that you can't modify the source identifiers using this call; to change source identifiers for a subscription, use the <a>AddSourceIdentifierToSubscription</a> and <a>RemoveSourceIdentifierFromSubscription</a> calls.</p> <p>You can see a list of the event categories for a given SourceType by using the <b>DescribeEventCategories</b> action.</p> fn modify_event_subscription( &self, input: ModifyEventSubscriptionMessage, ) -> RusotoFuture<ModifyEventSubscriptionResult, ModifyEventSubscriptionError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ModifyEventSubscription"); params.put("Version", "2014-10-31"); ModifyEventSubscriptionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ModifyEventSubscriptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = ModifyEventSubscriptionResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(ModifyEventSubscriptionResultDeserializer::deserialize( "ModifyEventSubscriptionResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Promotes a Read Replica DB cluster to a standalone DB cluster.</p> fn promote_read_replica_db_cluster( &self, input: PromoteReadReplicaDBClusterMessage, ) -> RusotoFuture<PromoteReadReplicaDBClusterResult, PromoteReadReplicaDBClusterError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "PromoteReadReplicaDBCluster"); params.put("Version", "2014-10-31"); PromoteReadReplicaDBClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(PromoteReadReplicaDBClusterError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = PromoteReadReplicaDBClusterResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(PromoteReadReplicaDBClusterResultDeserializer::deserialize( "PromoteReadReplicaDBClusterResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>You might need to reboot your DB instance, usually for maintenance reasons. For example, if you make certain modifications, or if you change the DB parameter group associated with the DB instance, you must reboot the instance for the changes to take effect. </p> <p>Rebooting a DB instance restarts the database engine service. Rebooting a DB instance results in a momentary outage, during which the DB instance status is set to rebooting. </p> fn reboot_db_instance( &self, input: RebootDBInstanceMessage, ) -> RusotoFuture<RebootDBInstanceResult, RebootDBInstanceError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "RebootDBInstance"); params.put("Version", "2014-10-31"); RebootDBInstanceMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(RebootDBInstanceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = RebootDBInstanceResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(RebootDBInstanceResultDeserializer::deserialize( "RebootDBInstanceResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Disassociates an Identity and Access Management (IAM) role from a DB cluster. </p> fn remove_role_from_db_cluster( &self, input: RemoveRoleFromDBClusterMessage, ) -> RusotoFuture<(), RemoveRoleFromDBClusterError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "RemoveRoleFromDBCluster"); params.put("Version", "2014-10-31"); RemoveRoleFromDBClusterMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(RemoveRoleFromDBClusterError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(future::ok(::std::mem::drop(response))) }) } /// <p>Removes a source identifier from an existing event notification subscription.</p> fn remove_source_identifier_from_subscription( &self, input: RemoveSourceIdentifierFromSubscriptionMessage, ) -> RusotoFuture< RemoveSourceIdentifierFromSubscriptionResult, RemoveSourceIdentifierFromSubscriptionError, > { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "RemoveSourceIdentifierFromSubscription"); params.put("Version", "2014-10-31"); RemoveSourceIdentifierFromSubscriptionMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(RemoveSourceIdentifierFromSubscriptionError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = RemoveSourceIdentifierFromSubscriptionResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( RemoveSourceIdentifierFromSubscriptionResultDeserializer::deserialize( "RemoveSourceIdentifierFromSubscriptionResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Removes metadata tags from an Amazon Neptune resource.</p> fn remove_tags_from_resource( &self, input: RemoveTagsFromResourceMessage, ) -> RusotoFuture<(), RemoveTagsFromResourceError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "RemoveTagsFromResource"); params.put("Version", "2014-10-31"); RemoveTagsFromResourceMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(RemoveTagsFromResourceError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(future::ok(::std::mem::drop(response))) }) } /// <p> Modifies the parameters of a DB cluster parameter group to the default value. To reset specific parameters submit a list of the following: <code>ParameterName</code> and <code>ApplyMethod</code>. To reset the entire DB cluster parameter group, specify the <code>DBClusterParameterGroupName</code> and <code>ResetAllParameters</code> parameters. </p> <p> When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to <code>pending-reboot</code> to take effect on the next DB instance restart or <a>RebootDBInstance</a> request. You must call <a>RebootDBInstance</a> for every DB instance in your DB cluster that you want the updated static parameter to apply to.</p> fn reset_db_cluster_parameter_group( &self, input: ResetDBClusterParameterGroupMessage, ) -> RusotoFuture<DBClusterParameterGroupNameMessage, ResetDBClusterParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ResetDBClusterParameterGroup"); params.put("Version", "2014-10-31"); ResetDBClusterParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ResetDBClusterParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBClusterParameterGroupNameMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBClusterParameterGroupNameMessageDeserializer::deserialize( "ResetDBClusterParameterGroupResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Modifies the parameters of a DB parameter group to the engine/system default value. To reset specific parameters, provide a list of the following: <code>ParameterName</code> and <code>ApplyMethod</code>. To reset the entire DB parameter group, specify the <code>DBParameterGroup</code> name and <code>ResetAllParameters</code> parameters. When resetting the entire group, dynamic parameters are updated immediately and static parameters are set to <code>pending-reboot</code> to take effect on the next DB instance restart or <code>RebootDBInstance</code> request. </p> fn reset_db_parameter_group( &self, input: ResetDBParameterGroupMessage, ) -> RusotoFuture<DBParameterGroupNameMessage, ResetDBParameterGroupError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "ResetDBParameterGroup"); params.put("Version", "2014-10-31"); ResetDBParameterGroupMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(ResetDBParameterGroupError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = DBParameterGroupNameMessage::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(DBParameterGroupNameMessageDeserializer::deserialize( "ResetDBParameterGroupResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p>Creates a new DB cluster from a DB snapshot or DB cluster snapshot.</p> <p>If a DB snapshot is specified, the target DB cluster is created from the source DB snapshot with a default configuration and default security group.</p> <p>If a DB cluster snapshot is specified, the target DB cluster is created from the source DB cluster restore point with the same configuration as the original source DB cluster, except that the new DB cluster is created with the default security group.</p> fn restore_db_cluster_from_snapshot( &self, input: RestoreDBClusterFromSnapshotMessage, ) -> RusotoFuture<RestoreDBClusterFromSnapshotResult, RestoreDBClusterFromSnapshotError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "RestoreDBClusterFromSnapshot"); params.put("Version", "2014-10-31"); RestoreDBClusterFromSnapshotMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(RestoreDBClusterFromSnapshotError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = RestoreDBClusterFromSnapshotResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!(RestoreDBClusterFromSnapshotResultDeserializer::deserialize( "RestoreDBClusterFromSnapshotResult", &mut stack )); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } /// <p><p>Restores a DB cluster to an arbitrary point in time. Users can restore to any point in time before <code>LatestRestorableTime</code> for up to <code>BackupRetentionPeriod</code> days. The target DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group. </p> <note> <p>This action only restores the DB cluster, not the DB instances for that DB cluster. You must invoke the <a>CreateDBInstance</a> action to create DB instances for the restored DB cluster, specifying the identifier of the restored DB cluster in <code>DBClusterIdentifier</code>. You can create DB instances only after the <code>RestoreDBClusterToPointInTime</code> action has completed and the DB cluster is available.</p> </note></p> fn restore_db_cluster_to_point_in_time( &self, input: RestoreDBClusterToPointInTimeMessage, ) -> RusotoFuture<RestoreDBClusterToPointInTimeResult, RestoreDBClusterToPointInTimeError> { let mut request = SignedRequest::new("POST", "rds", &self.region, "/"); let mut params = Params::new(); params.put("Action", "RestoreDBClusterToPointInTime"); params.put("Version", "2014-10-31"); RestoreDBClusterToPointInTimeMessageSerializer::serialize(&mut params, "", &input); request.set_payload(Some( serde_urlencoded::to_string(&params).unwrap().into_bytes(), )); request.set_content_type("application/x-www-form-urlencoded".to_owned()); self.client.sign_and_dispatch(request, |response| { if !response.status.is_success() { return Box::new(response.buffer().from_err().and_then(|response| { Err(RestoreDBClusterToPointInTimeError::from_body( String::from_utf8_lossy(response.body.as_ref()).as_ref(), )) })); } Box::new(response.buffer().from_err().and_then(move |response| { let result; if response.body.is_empty() { result = RestoreDBClusterToPointInTimeResult::default(); } else { let reader = EventReader::new_with_config( response.body.as_slice(), ParserConfig::new().trim_whitespace(true), ); let mut stack = XmlResponse::new(reader.into_iter().peekable()); let _start_document = stack.next(); let actual_tag_name = try!(peek_at_name(&mut stack)); try!(start_element(&actual_tag_name, &mut stack)); result = try!( RestoreDBClusterToPointInTimeResultDeserializer::deserialize( "RestoreDBClusterToPointInTimeResult", &mut stack ) ); skip_tree(&mut stack); try!(end_element(&actual_tag_name, &mut stack)); } Ok(result) })) }) } } #[cfg(test)] mod protocol_tests {}
Some(&Ok(XmlEvent::EndElement { ref name, .. })) => DeserializerNext::Close, Some(&Ok(XmlEvent::StartElement { ref name, .. })) => { DeserializerNext::Element(name.local_name.to_owned()) }
resolve-inconsistent-names.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. fn main() { let y = 1; match y { a | b =>
//~ ERROR variable `a` from pattern #1 is not bound in pattern #2 //~^ ERROR variable `b` from pattern #2 is not bound in pattern #1 } }
{}
local_secondary_index.ts
import { DynamoDB } from "aws-sdk"; import * as _ from "lodash"; import { ITable, Table } from "../table"; import * as Codec from "../codec"; import * as Metadata from "../metadata"; import * as Query from "./query"; const HASH_KEY_REF = "#hk"; const HASH_VALUE_REF = ":hkv"; const RANGE_KEY_REF = "#rk"; export class
<T extends Table, HashKeyType, RangeKeyType> { constructor( readonly tableClass: ITable<T>, readonly metadata: Metadata.Indexes.LocalSecondaryIndexMetadata, ) {} public async query(options: { hash: HashKeyType, range?: Query.Conditions<RangeKeyType>, rangeOrder?: "ASC" | "DESC", limit?: number, exclusiveStartKey?: DynamoDB.DocumentClient.Key, consistent?: boolean, }) { if (!options.rangeOrder) { options.rangeOrder = "ASC"; } const ScanIndexForward = options.rangeOrder === "ASC"; const params: DynamoDB.DocumentClient.QueryInput = { TableName: this.tableClass.metadata.name, Limit: options.limit, IndexName: this.metadata.name, ScanIndexForward, ExclusiveStartKey: options.exclusiveStartKey, ReturnConsumedCapacity: "TOTAL", KeyConditionExpression: `${HASH_KEY_REF} = ${HASH_VALUE_REF}`, ExpressionAttributeNames: { [HASH_KEY_REF]: this.tableClass.metadata.primaryKey.hash.name, }, ExpressionAttributeValues: { [HASH_VALUE_REF]: options.hash, }, ConsistentRead: options.consistent, }; if (options.range) { const rangeKeyOptions = Query.parseCondition(options.range, RANGE_KEY_REF); params.KeyConditionExpression += ` AND ${rangeKeyOptions.conditionExpression}`; Object.assign(params.ExpressionAttributeNames, { [RANGE_KEY_REF]: this.metadata.range.name }); Object.assign(params.ExpressionAttributeValues, rangeKeyOptions.expressionAttributeValues); } const result = await this.tableClass.metadata.connection.documentClient.query(params).promise(); return { records: (result.Items || []).map((item) => { return Codec.deserialize(this.tableClass, item); }), count: result.Count, scannedCount: result.ScannedCount, lastEvaluatedKey: result.LastEvaluatedKey, consumedCapacity: result.ConsumedCapacity, }; } public async scan(options: { limit?: number, totalSegments?: number, segment?: number, exclusiveStartKey?: DynamoDB.DocumentClient.Key, } = {}) { const params: DynamoDB.DocumentClient.ScanInput = { TableName: this.tableClass.metadata.name, Limit: options.limit, ExclusiveStartKey: options.exclusiveStartKey, ReturnConsumedCapacity: "TOTAL", TotalSegments: options.totalSegments, Segment: options.segment, }; const result = await this.tableClass.metadata.connection.documentClient.scan(params).promise(); return { records: (result.Items || []).map((item) => { return Codec.deserialize(this.tableClass, item); }), count: result.Count, scannedCount: result.ScannedCount, lastEvaluatedKey: result.LastEvaluatedKey, consumedCapacity: result.ConsumedCapacity, }; } }
LocalSecondaryIndex
sheer_index.py
import os.path from django.conf import settings from django.core.management.base import BaseCommand from sheerlike.indexer import index LOCATION = os.environ.get('SHEER_LOCATION', os.getcwd()) ELASTICSEARCH_HOSTS = settings.SHEER_ELASTICSEARCH_SERVER ELASTICSEARCH_INDEX = settings.SHEER_ELASTICSEARCH_INDEX
parser.add_argument('--reindex', '-r', action="store_true", help="Recreate the index and reindex all content.") parser.add_argument('--processors', '-p', nargs='*', help='Content processors to index.') parser.add_argument( '--elasticsearch', '-e', default=ELASTICSEARCH_HOSTS, help=("Elasticsearch host:port pairs. Separate hosts with commas. " "Default is localhost:9200. You can also set the " "SHEER_ELASTICSEARCH_HOSTS environment variable.")) parser.add_argument( '--index', '-i', default=ELASTICSEARCH_INDEX, help=("Elasticsearch index name. Default is 'content'. You can " "also set the SHEER_ELASTICSEARCH_INDEX environment " "variable.")) def handle(self, *args, **options): index(args, options)
class Command(BaseCommand): help = "Run the classic 'sheer' indexer" def add_arguments(self, parser):
x11_app_launch_context.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use std::fmt; glib::wrapper! { #[doc(alias = "GdkX11AppLaunchContext")] pub struct X11AppLaunchContext(Object<ffi::GdkX11AppLaunchContext, ffi::GdkX11AppLaunchContextClass>) @extends gdk::AppLaunchContext, gio::AppLaunchContext; match fn { type_ => || ffi::gdk_x11_app_launch_context_get_type(), } } impl X11AppLaunchContext {} impl fmt::Display for X11AppLaunchContext { fn
(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("X11AppLaunchContext") } }
fmt
method_test.go
package core import ( "testing" ) func TestForEachMethodsMap(t *testing.T) { for k, v := range MethodsMap { t.Log(k.String()) for k1, v1 := range v { t.Log("\r", k1.String(), v1) } } } func TestMethodNamesMap(t *testing.T)
func TestForEachMethodNames(t *testing.T) { for k, v := range MethodNameList { t.Log(k, v) } } func TestStringCMP(t *testing.T) { t.Log(94 & 8) }
{ for k := range MethodNamesMap { t.Log(k) } }
isfs_EnrolmentDetail.js
/// <reference path="xrm_v9.js" /> /// <reference path="isfs_utility.js" /> if (typeof(ISFS) === "undefined") { ISFS = { __namespace: true }; } ISFS.EnrolmentDetail = { /** */ OnFormLoad: function (executionContext) { var formContext = executionContext.getFormContext(); if (formContext.ui.getFormType() == FormType_Create) formContext.getControl("isfs_adjustedenrolmentnumber").setVisible(false); formContext.getControl("isfs_adjustmentnotes").setVisible(false); formContext.getAttribute("isfs_adjustmentnotes").setRequiredLevel("none"); }, /** */ OnChange_Enrolment: function (executionContext) { var formContext = executionContext.getFormContext(); if (formContext.ui.getFormType() != FormType_Update) return; var enrolmentNumber = formContext.getAttribute("isfs_enrolmentnumber").getIsDirty(); var adjusted = formContext.getAttribute("isfs_adjustedenrolmentnumber").getIsDirty(); if (enrolmentNumber == true || adjusted == true) { formContext.getControl("isfs_adjustmentnotes").setVisible(true);
else { formContext.getControl("isfs_adjustmentnotes").setVisible(false); formContext.getAttribute("isfs_adjustmentnotes").setRequiredLevel("none"); } }, __namespace: true };
formContext.getAttribute("isfs_adjustmentnotes").setRequiredLevel("required"); formContext.getAttribute("isfs_adjustmentnotes").setValue(""); if(adjusted) formContext.getControl("isfs_adjustmentnotes").setFocus(); }
service_test.go
package macaroons_test import ( "context" "encoding/hex" "io/ioutil" "os" "path" "testing" "github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/macaroons" "google.golang.org/grpc/metadata" "gopkg.in/macaroon-bakery.v2/bakery" "gopkg.in/macaroon-bakery.v2/bakery/checkers" ) var ( testOperation = bakery.Op{ Entity: "testEntity", Action: "read", } defaultPw = []byte("hello") ) // setupTestRootKeyStorage creates a dummy root key storage by // creating a temporary macaroons.db and initializing it with the // default password of 'hello'. Only the path to the temporary // DB file is returned, because the service will open the file // and read the store on its own. func setupTestRootKeyStorage(t *testing.T) string { tempDir, err := ioutil.TempDir("", "macaroonstore-") if err != nil { t.Fatalf("Error creating temp dir: %v", err) } db, err := bolt.Open(path.Join(tempDir, "macaroons.db"), 0600, bolt.DefaultOptions) if err != nil { t.Fatalf("Error opening store DB: %v", err) } store, err := macaroons.NewRootKeyStorage(db) if err != nil { db.Close() t.Fatalf("Error creating root key store: %v", err) } defer store.Close() err = store.CreateUnlock(&defaultPw) return tempDir } // TestNewService tests the creation of the macaroon service. func TestNewService(t *testing.T) { // First, initialize a dummy DB file with a store that the service // can read from. Make sure the file is removed in the end. tempDir := setupTestRootKeyStorage(t) defer os.RemoveAll(tempDir) // Second, create the new service instance, unlock it and pass in a // checker that we expect it to add to the bakery. service, err := macaroons.NewService(tempDir, macaroons.IPLockChecker) defer service.Close() if err != nil { t.Fatalf("Error creating new service: %v", err) } err = service.CreateUnlock(&defaultPw) if err != nil { t.Fatalf("Error unlocking root key storage: %v", err) } // Third, check if the created service can bake macaroons. macaroon, err := service.Oven.NewMacaroon(nil, bakery.LatestVersion, nil, testOperation) if err != nil { t.Fatalf("Error creating macaroon from service: %v", err) } if macaroon.Namespace().String() != "std:" { t.Fatalf("The created macaroon has an invalid namespace: %s", macaroon.Namespace().String()) } // Finally, check if the service has been initialized correctly and // the checker has been added. var checkerFound = false checker := service.Checker.FirstPartyCaveatChecker.(*checkers.Checker) for _, info := range checker.Info() { if info.Name == "ipaddr" && info.Prefix == "" && info.Namespace == "std" { checkerFound = true } } if !checkerFound { t.Fatalf("Checker '%s' not found in service.", "ipaddr") } } // TestValidateMacaroon tests the validation of a macaroon that is in an // incoming context. func TestValidateMacaroon(t *testing.T) { // First, initialize the service and unlock it. tempDir := setupTestRootKeyStorage(t) defer os.RemoveAll(tempDir) service, err := macaroons.NewService(tempDir, macaroons.IPLockChecker) defer service.Close() if err != nil { t.Fatalf("Error creating new service: %v", err) } err = service.CreateUnlock(&defaultPw) if err != nil { t.Fatalf("Error unlocking root key storage: %v", err) } // Then, create a new macaroon that we can serialize. macaroon, err := service.Oven.NewMacaroon(nil, bakery.LatestVersion, nil, testOperation) if err != nil { t.Fatalf("Error creating macaroon from service: %v", err) } macaroonBinary, err := macaroon.M().MarshalBinary() if err != nil { t.Fatalf("Error serializing macaroon: %v", err) } // Because the macaroons are always passed in a context, we need to // mock one that has just the serialized macaroon as a value. md := metadata.New(map[string]string{ "macaroon": hex.EncodeToString(macaroonBinary), }) mockContext := metadata.NewIncomingContext(context.Background(), md) // Finally, validate the macaroon against the required permissions. err = service.ValidateMacaroon(mockContext, []bakery.Op{testOperation}) if err != nil { t.Fatalf("Error validating the macaroon: %v", err)
} }
Counter.js
import React, {Component} from 'react'; import ReactDOM from 'react-dom'; class Counter extends Component { state = { counter : 0 }; incrementCounter = (value) => { //Update counter value let counterNew = this.state.counter + value;
decrementCounter = () => { //Update counter value -1 let counterNew = this.state.counter - 1; this.setState({ counter: counterNew, }) }; render() { return( <div> <div className="container mt-5"> <h2>Count: {this.state.counter}</h2> <p> <button className="btn btn-success btn-lg" onClick={() => this.incrementCounter(10)}>+</button> <button className="btn btn-danger btn-lg ml-2" onClick={this.decrementCounter}>-</button> </p> </div> </div> ); } } export default Counter; if (document.getElementById('counter')) { ReactDOM.render(<Counter />, document.getElementById('counter')); }
this.setState({ counter: counterNew, }) };
input-moment.js
!function(e){function t(r){if(n[r])return n[r].exports;var o=n[r]={exports:{},id:r,loaded:!1};return e[r].call(o.exports,o,o.exports,t),o.loaded=!0,o.exports}var n={};return t.m=e,t.c=n,t.p="",t(0)}([function(e,t,n){"use strict";e.exports=n(44)},function(e,t){function
(){throw new Error("setTimeout has not been defined")}function r(){throw new Error("clearTimeout has not been defined")}function o(e){if(s===setTimeout)return setTimeout(e,0);if((s===n||!s)&&setTimeout)return s=setTimeout,setTimeout(e,0);try{return s(e,0)}catch(t){try{return s.call(null,e,0)}catch(t){return s.call(this,e,0)}}}function a(e){if(l===clearTimeout)return clearTimeout(e);if((l===r||!l)&&clearTimeout)return l=clearTimeout,clearTimeout(e);try{return l(e)}catch(t){try{return l.call(null,e)}catch(t){return l.call(this,e)}}}function i(){y&&d&&(y=!1,d.length?m=d.concat(m):v=-1,m.length&&u())}function u(){if(!y){var e=o(i);y=!0;for(var t=m.length;t;){for(d=m,m=[];++v<t;)d&&d[v].run();v=-1,t=m.length}d=null,y=!1,a(e)}}function c(e,t){this.fun=e,this.array=t}function f(){}var s,l,p=e.exports={};!function(){try{s="function"==typeof setTimeout?setTimeout:n}catch(e){s=n}try{l="function"==typeof clearTimeout?clearTimeout:r}catch(e){l=r}}();var d,m=[],y=!1,v=-1;p.nextTick=function(e){var t=new Array(arguments.length-1);if(arguments.length>1)for(var n=1;n<arguments.length;n++)t[n-1]=arguments[n];m.push(new c(e,t)),1!==m.length||y||o(u)},c.prototype.run=function(){this.fun.apply(null,this.array)},p.title="browser",p.browser=!0,p.env={},p.argv=[],p.version="",p.versions={},p.on=f,p.addListener=f,p.once=f,p.off=f,p.removeListener=f,p.removeAllListeners=f,p.emit=f,p.prependListener=f,p.prependOnceListener=f,p.listeners=function(e){return[]},p.binding=function(e){throw new Error("process.binding is not supported")},p.cwd=function(){return"/"},p.chdir=function(e){throw new Error("process.chdir is not supported")},p.umask=function(){return 0}},function(e,t){e.exports=React},function(e,t,n){var r,o;/*! Copyright (c) 2016 Jed Watson. Licensed under the MIT License (MIT), see http://jedwatson.github.io/classnames */ !function(){"use strict";function n(){for(var e=[],t=0;t<arguments.length;t++){var r=arguments[t];if(r){var o=typeof r;if("string"===o||"number"===o)e.push(r);else if(Array.isArray(r))e.push(n.apply(null,r));else if("object"===o)for(var i in r)a.call(r,i)&&r[i]&&e.push(i)}}return e.join(" ")}var a={}.hasOwnProperty;"undefined"!=typeof e&&e.exports?e.exports=n:(r=[],o=function(){return n}.apply(t,r),!(void 0!==o&&(e.exports=o)))}()},function(e,t){"use strict";function n(e){return function(){return e}}var r=function(){};r.thatReturns=n,r.thatReturnsFalse=n(!1),r.thatReturnsTrue=n(!0),r.thatReturnsNull=n(null),r.thatReturnsThis=function(){return this},r.thatReturnsArgument=function(e){return e},e.exports=r},function(e,t,n){(function(t){"use strict";function n(e,t,n,o,a,i,u,c){if(r(t),!e){var f;if(void 0===t)f=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var s=[n,o,a,i,u,c],l=0;f=new Error(t.replace(/%s/g,function(){return s[l++]})),f.name="Invariant Violation"}throw f.framesToPop=1,f}}var r=function(e){};"production"!==t.env.NODE_ENV&&(r=function(e){if(void 0===e)throw new Error("invariant requires an error message argument")}),e.exports=n}).call(t,n(1))},function(e,t){function n(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)}e.exports=n},function(e,t){"use strict";var n="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED";e.exports=n},function(e,t){e.exports=moment},function(e,t,n){(function(t){"use strict";var r=n(4),o=r;if("production"!==t.env.NODE_ENV){var a=function(e){for(var t=arguments.length,n=Array(t>1?t-1:0),r=1;r<t;r++)n[r-1]=arguments[r];var o=0,a="Warning: "+e.replace(/%s/g,function(){return n[o++]});"undefined"!=typeof console&&console.error(a);try{throw new Error(a)}catch(e){}};o=function(e,t){if(void 0===t)throw new Error("`warning(condition, format, ...args)` requires a warning message argument");if(0!==t.indexOf("Failed Composite propType: ")&&!e){for(var n=arguments.length,r=Array(n>2?n-2:0),o=2;o<n;o++)r[o-2]=arguments[o];a.apply(void 0,[t].concat(r))}}}e.exports=o}).call(t,n(1))},function(e,t,n){var r=n(24),o=r.Symbol;e.exports=o},function(e,t,n){function r(e){return null==e?void 0===e?c:u:f&&f in Object(e)?a(e):i(e)}var o=n(10),a=n(21),i=n(23),u="[object Null]",c="[object Undefined]",f=o?o.toStringTag:void 0;e.exports=r},function(e,t,n){function r(e,t,n){if(!u(n))return!1;var r=typeof t;return!!("number"==r?a(n)&&i(t,n.length):"string"==r&&t in n)&&o(n[t],e)}var o=n(26),a=n(27),i=n(22),u=n(6);e.exports=r},function(e,t,n){function r(e){if(!e)return 0===e?e:0;if(e=o(e),e===a||e===-a){var t=e<0?-1:1;return t*i}return e===e?e:0}var o=n(34),a=1/0,i=1.7976931348623157e308;e.exports=r},function(e,t){e.exports=ReactDOM},function(e,t){e.exports=function(e){var t={},n=arguments[1];if("string"==typeof n){n={};for(var r=1;r<arguments.length;r++)n[arguments[r]]=!0}for(var o in e)n[o]||(t[o]=e[o]);return t}},,function(e,t){function n(e,t,n,a){for(var i=-1,u=o(r((t-e)/(n||1)),0),c=Array(u);u--;)c[a?u:++i]=e,e+=n;return c}var r=Math.ceil,o=Math.max;e.exports=n},function(e,t){function n(e,t,n){var r=-1,o=e.length;t<0&&(t=-t>o?0:o+t),n=n>o?o:n,n<0&&(n+=o),o=t>n?0:n-t>>>0,t>>>=0;for(var a=Array(o);++r<o;)a[r]=e[r+t];return a}e.exports=n},function(e,t,n){function r(e){return function(t,n,r){return r&&"number"!=typeof r&&a(t,n,r)&&(n=r=void 0),t=i(t),void 0===n?(n=t,t=0):n=i(n),r=void 0===r?t<n?1:-1:i(r),o(t,n,r,e)}}var o=n(17),a=n(12),i=n(13);e.exports=r},function(e,t){(function(t){var n="object"==typeof t&&t&&t.Object===Object&&t;e.exports=n}).call(t,function(){return this}())},function(e,t,n){function r(e){var t=i.call(e,c),n=e[c];try{e[c]=void 0;var r=!0}catch(e){}var o=u.call(e);return r&&(t?e[c]=n:delete e[c]),o}var o=n(10),a=Object.prototype,i=a.hasOwnProperty,u=a.toString,c=o?o.toStringTag:void 0;e.exports=r},function(e,t){function n(e,t){return t=null==t?r:t,!!t&&("number"==typeof e||o.test(e))&&e>-1&&e%1==0&&e<t}var r=9007199254740991,o=/^(?:0|[1-9]\d*)$/;e.exports=n},function(e,t){function n(e){return o.call(e)}var r=Object.prototype,o=r.toString;e.exports=n},function(e,t,n){var r=n(20),o="object"==typeof self&&self&&self.Object===Object&&self,a=r||o||Function("return this")();e.exports=a},function(e,t,n){function r(e,t,n){t=(n?a(e,t,n):void 0===t)?1:c(i(t),0);var r=null==e?0:e.length;if(!r||t<1)return[];for(var f=0,s=0,l=Array(u(r/t));f<r;)l[s++]=o(e,f,f+=t);return l}var o=n(18),a=n(12),i=n(33),u=Math.ceil,c=Math.max;e.exports=r},function(e,t){function n(e,t){return e===t||e!==e&&t!==t}e.exports=n},function(e,t,n){function r(e){return null!=e&&a(e.length)&&!o(e)}var o=n(28),a=n(29);e.exports=r},function(e,t,n){function r(e){if(!a(e))return!1;var t=o(e);return t==u||t==c||t==i||t==f}var o=n(11),a=n(6),i="[object AsyncFunction]",u="[object Function]",c="[object GeneratorFunction]",f="[object Proxy]";e.exports=r},function(e,t){function n(e){return"number"==typeof e&&e>-1&&e%1==0&&e<=r}var r=9007199254740991;e.exports=n},function(e,t){function n(e){return null!=e&&"object"==typeof e}e.exports=n},function(e,t,n){function r(e){return"symbol"==typeof e||a(e)&&o(e)==i}var o=n(11),a=n(30),i="[object Symbol]";e.exports=r},function(e,t,n){var r=n(19),o=r();e.exports=o},function(e,t,n){function r(e){var t=o(e),n=t%1;return t===t?n?t-n:t:0}var o=n(13);e.exports=r},function(e,t,n){function r(e){if("number"==typeof e)return e;if(a(e))return i;if(o(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=o(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=e.replace(u,"");var n=f.test(e);return n||s.test(e)?l(e.slice(2),n?2:8):c.test(e)?i:+e}var o=n(6),a=n(31),i=NaN,u=/^\s+|\s+$/g,c=/^[-+]0x[0-9a-f]+$/i,f=/^0b[01]+$/i,s=/^0o[0-7]+$/i,l=parseInt;e.exports=r},function(e,t,n){(function(t){"use strict";function r(e,n,r,c,f){if("production"!==t.env.NODE_ENV)for(var s in e)if(e.hasOwnProperty(s)){var l;try{o("function"==typeof e[s],"%s: %s type `%s` is invalid; it must be a function, usually from React.PropTypes.",c||"React class",r,s),l=e[s](n,s,c,r,null,i)}catch(e){l=e}if(a(!l||l instanceof Error,"%s: type specification of %s `%s` is invalid; the type checker function must return `null` or an `Error` but returned a %s. You may have forgotten to pass an argument to the type checker creator (arrayOf, instanceOf, objectOf, oneOf, oneOfType, and shape all require an argument).",c||"React class",r,s,typeof l),l instanceof Error&&!(l.message in u)){u[l.message]=!0;var p=f?f():"";a(!1,"Failed %s type: %s%s",r,l.message,null!=p?p:"")}}}if("production"!==t.env.NODE_ENV)var o=n(5),a=n(9),i=n(7),u={};e.exports=r}).call(t,n(1))},function(e,t,n){"use strict";var r=n(4),o=n(5),a=n(7);e.exports=function(){function e(e,t,n,r,i,u){u!==a&&o(!1,"Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types")}function t(){return e}e.isRequired=e;var n={array:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t};return n.checkPropTypes=r,n.PropTypes=n,n}},function(e,t,n){(function(t){"use strict";var r=n(4),o=n(5),a=n(9),i=n(7),u=n(35);e.exports=function(e,n){function c(e){var t=e&&(P&&e[P]||e[S]);if("function"==typeof t)return t}function f(e,t){return e===t?0!==e||1/e===1/t:e!==e&&t!==t}function s(e){this.message=e,this.stack=""}function l(e){function r(r,f,l,p,d,m,y){if(p=p||C,m=m||l,y!==i)if(n)o(!1,"Calling PropTypes validators directly is not supported by the `prop-types` package. Use `PropTypes.checkPropTypes()` to call them. Read more at http://fb.me/use-check-prop-types");else if("production"!==t.env.NODE_ENV&&"undefined"!=typeof console){var v=p+":"+l;!u[v]&&c<3&&(a(!1,"You are manually calling a React.PropTypes validation function for the `%s` prop on `%s`. This is deprecated and will throw in the standalone `prop-types` package. You may be seeing this warning due to a third-party PropTypes library. See https://fb.me/react-warning-dont-call-proptypes for details.",m,p),u[v]=!0,c++)}return null==f[l]?r?new s(null===f[l]?"The "+d+" `"+m+"` is marked as required "+("in `"+p+"`, but its value is `null`."):"The "+d+" `"+m+"` is marked as required in "+("`"+p+"`, but its value is `undefined`.")):null:e(f,l,p,d,m)}if("production"!==t.env.NODE_ENV)var u={},c=0;var f=r.bind(null,!1);return f.isRequired=r.bind(null,!0),f}function p(e){function t(t,n,r,o,a,i){var u=t[n],c=j(u);if(c!==e){var f=N(u);return new s("Invalid "+o+" `"+a+"` of type "+("`"+f+"` supplied to `"+r+"`, expected ")+("`"+e+"`."))}return null}return l(t)}function d(){return l(r.thatReturnsNull)}function m(e){function t(t,n,r,o,a){if("function"!=typeof e)return new s("Property `"+a+"` of component `"+r+"` has invalid PropType notation inside arrayOf.");var u=t[n];if(!Array.isArray(u)){var c=j(u);return new s("Invalid "+o+" `"+a+"` of type "+("`"+c+"` supplied to `"+r+"`, expected an array."))}for(var f=0;f<u.length;f++){var l=e(u,f,r,o,a+"["+f+"]",i);if(l instanceof Error)return l}return null}return l(t)}function y(){function t(t,n,r,o,a){var i=t[n];if(!e(i)){var u=j(i);return new s("Invalid "+o+" `"+a+"` of type "+("`"+u+"` supplied to `"+r+"`, expected a single ReactElement."))}return null}return l(t)}function v(e){function t(t,n,r,o,a){if(!(t[n]instanceof e)){var i=e.name||C,u=_(t[n]);return new s("Invalid "+o+" `"+a+"` of type "+("`"+u+"` supplied to `"+r+"`, expected ")+("instance of `"+i+"`."))}return null}return l(t)}function h(e){function n(t,n,r,o,a){for(var i=t[n],u=0;u<e.length;u++)if(f(i,e[u]))return null;var c=JSON.stringify(e);return new s("Invalid "+o+" `"+a+"` of value `"+i+"` "+("supplied to `"+r+"`, expected one of "+c+"."))}return Array.isArray(e)?l(n):("production"!==t.env.NODE_ENV?a(!1,"Invalid argument supplied to oneOf, expected an instance of array."):void 0,r.thatReturnsNull)}function b(e){function t(t,n,r,o,a){if("function"!=typeof e)return new s("Property `"+a+"` of component `"+r+"` has invalid PropType notation inside objectOf.");var u=t[n],c=j(u);if("object"!==c)return new s("Invalid "+o+" `"+a+"` of type "+("`"+c+"` supplied to `"+r+"`, expected an object."));for(var f in u)if(u.hasOwnProperty(f)){var l=e(u,f,r,o,a+"."+f,i);if(l instanceof Error)return l}return null}return l(t)}function g(e){function n(t,n,r,o,a){for(var u=0;u<e.length;u++){var c=e[u];if(null==c(t,n,r,o,a,i))return null}return new s("Invalid "+o+" `"+a+"` supplied to "+("`"+r+"`."))}if(!Array.isArray(e))return"production"!==t.env.NODE_ENV?a(!1,"Invalid argument supplied to oneOfType, expected an instance of array."):void 0,r.thatReturnsNull;for(var o=0;o<e.length;o++){var u=e[o];if("function"!=typeof u)return a(!1,"Invalid argument supplid to oneOfType. Expected an array of check functions, but received %s at index %s.",T(u),o),r.thatReturnsNull}return l(n)}function x(){function e(e,t,n,r,o){return O(e[t])?null:new s("Invalid "+r+" `"+o+"` supplied to "+("`"+n+"`, expected a ReactNode."))}return l(e)}function E(e){function t(t,n,r,o,a){var u=t[n],c=j(u);if("object"!==c)return new s("Invalid "+o+" `"+a+"` of type `"+c+"` "+("supplied to `"+r+"`, expected `object`."));for(var f in e){var l=e[f];if(l){var p=l(u,f,r,o,a+"."+f,i);if(p)return p}}return null}return l(t)}function O(t){switch(typeof t){case"number":case"string":case"undefined":return!0;case"boolean":return!t;case"object":if(Array.isArray(t))return t.every(O);if(null===t||e(t))return!0;var n=c(t);if(!n)return!1;var r,o=n.call(t);if(n!==t.entries){for(;!(r=o.next()).done;)if(!O(r.value))return!1}else for(;!(r=o.next()).done;){var a=r.value;if(a&&!O(a[1]))return!1}return!0;default:return!1}}function w(e,t){return"symbol"===e||("Symbol"===t["@@toStringTag"]||"function"==typeof Symbol&&t instanceof Symbol)}function j(e){var t=typeof e;return Array.isArray(e)?"array":e instanceof RegExp?"object":w(t,e)?"symbol":t}function N(e){if("undefined"==typeof e||null===e)return""+e;var t=j(e);if("object"===t){if(e instanceof Date)return"date";if(e instanceof RegExp)return"regexp"}return t}function T(e){var t=N(e);switch(t){case"array":case"object":return"an "+t;case"boolean":case"date":case"regexp":return"a "+t;default:return t}}function _(e){return e.constructor&&e.constructor.name?e.constructor.name:C}var P="function"==typeof Symbol&&Symbol.iterator,S="@@iterator",C="<<anonymous>>",k={array:p("array"),bool:p("boolean"),func:p("function"),number:p("number"),object:p("object"),string:p("string"),symbol:p("symbol"),any:d(),arrayOf:m,element:y(),instanceOf:v,node:x(),objectOf:b,oneOf:h,oneOfType:g,shape:E};return s.prototype=Error.prototype,k.checkPropTypes=u,k.PropTypes=k,k}}).call(t,n(1))},function(e,t,n){(function(t){if("production"!==t.env.NODE_ENV){var r="function"==typeof Symbol&&Symbol.for&&Symbol.for("react.element")||60103,o=function(e){return"object"==typeof e&&null!==e&&e.$$typeof===r},a=!0;e.exports=n(37)(o,a)}else e.exports=n(36)()}).call(t,n(1))},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!=typeof t&&"function"!=typeof t?e:t}function i(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+typeof t);e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}var u=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},c=function(){function e(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}(),f=n(3),s=r(f),l=n(15),p=r(l),d=n(2),m=r(d),y=n(14),v=r(y),h=n(38),b=r(h),g=function(e){function t(){var e,n,r,i;o(this,t);for(var u=arguments.length,c=Array(u),f=0;f<u;f++)c[f]=arguments[f];return n=r=a(this,(e=t.__proto__||Object.getPrototypeOf(t)).call.apply(e,[this].concat(c))),r.getClientPosition=function(e){var t=e.touches;if(t&&t.length){var n=t[0];return{x:n.clientX,y:n.clientY}}return{x:e.clientX,y:e.clientY}},r.getPosition=function(){var e=(r.props.y-r.props.ymin)/(r.props.ymax-r.props.ymin)*100,t=(r.props.x-r.props.xmin)/(r.props.xmax-r.props.xmin)*100;return e>100&&(e=100),e<0&&(e=0),"x"===r.props.axis&&(e=0),e+="%",t>100&&(t=100),t<0&&(t=0),"y"===r.props.axis&&(t=0),t+="%",{top:e,left:t}},r.change=function(e,t){if(r.props.onChange){var n=v.default.findDOMNode(r).getBoundingClientRect(),o=n.width,a=n.height,i=r.props.axis,u=e.top,c=e.left;c<0&&(c=0),c>o&&(c=o),u<0&&(u=0),u>a&&(u=a);var f=0,s=0;"x"!==i&&"xy"!==i||(f=c/o*(r.props.xmax-r.props.xmin)+r.props.xmin),"y"!==i&&"xy"!==i||(s=u/a*(r.props.ymax-r.props.ymin)+r.props.ymin),r.props.onChange({x:f,y:s})}},r.handleMouseDown=function(e){e.preventDefault();var t=r.refs.handle,n=r.getClientPosition(e);r.start={x:t.offsetLeft,y:t.offsetTop},r.offset={x:n.x,y:n.y},document.addEventListener("mousemove",r.handleDrag),document.addEventListener("mouseup",r.handleDragEnd),document.addEventListener("touchmove",r.handleDrag),document.addEventListener("touchend",r.handleDragEnd),document.addEventListener("touchcancel",r.handleDragEnd)},r.getPos=function(e){var t=r.getClientPosition(e),n=(v.default.findDOMNode(r).getBoundingClientRect(),t.x+r.start.x-r.offset.x),o=t.y+r.start.y-r.offset.y;return{left:n,top:o}},r.handleDrag=function(e){e.preventDefault(),r.change(r.getPos(e))},r.handleDragEnd=function(e){e.preventDefault(),document.removeEventListener("mousemove",r.handleDrag),document.removeEventListener("mouseup",r.handleDragEnd),document.removeEventListener("touchmove",r.handleDrag),document.removeEventListener("touchend",r.handleDragEnd),document.removeEventListener("touchcancel",r.handleDragEnd),r.props.onDragEnd&&r.props.onDragEnd()},r.handleClick=function(e){var t=r.getClientPosition(e),n=v.default.findDOMNode(r).getBoundingClientRect();r.change({left:t.x-n.left,top:t.y-n.top},!0)},i=n,a(r,i)}return i(t,e),c(t,[{key:"render",value:function(){var e=this.props.axis,t=(0,p.default)(this.props,"axis","x","y","xmin","xmax","ymin","ymax","onChange","onDragEnd","className","onClick"),n=this.getPosition(),r={};return"x"===e&&(r.width=n.left),"y"===e&&(r.height=n.top),t.className=(0,s.default)("u-slider","u-slider-"+e,this.props.className),m.default.createElement("div",u({},t,{onClick:this.handleClick}),m.default.createElement("div",{className:"value",style:r}),m.default.createElement("div",{className:"handle",ref:"handle",onTouchStart:this.handleMouseDown,onMouseDown:this.handleMouseDown,onClick:function(e){e.stopPropagation(),e.nativeEvent.stopImmediatePropagation()},style:n}))}}]),t}(d.Component);g.propTypes={axis:b.default.string,x:b.default.number,xmax:b.default.number,xmin:b.default.number,y:b.default.number,ymax:b.default.number,ymin:b.default.number},g.defaultProps={axis:"x",xmin:0,ymin:0},e.exports=g},function(e,t,n){e.exports=n(39)},,,function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!==("undefined"==typeof t?"undefined":c(t))&&"function"!=typeof t?e:t}function i(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+("undefined"==typeof t?"undefined":c(t)));e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}function u(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}var c="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};Object.defineProperty(t,"__esModule",{value:!0});var f=function(){function e(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}(),s=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},l=n(8),p=(r(l),n(2)),d=r(p),m=n(3),y=r(m),v=n(32),h=r(v),b=n(25),g=r(b),x=function(e){var t=e.i,n=e.w,r=e.d,o=(e.className,u(e,["i","w","d","className"])),a=0===n&&t>7,i=n>=4&&t<=14,c=(0,y.default)({"prev-month":a,"next-month":i,"current-day":!a&&!i&&t===r});return d.default.createElement("td",s({className:c},o),t)},E=function(e){function t(){var e,n,r,i;o(this,t);for(var u=arguments.length,c=Array(u),f=0;f<u;f++)c[f]=arguments[f];return n=r=a(this,(e=t.__proto__||Object.getPrototypeOf(t)).call.apply(e,[this].concat(c))),r.selectDate=function(e,t){var n=0===t&&e>7,o=t>=4&&e<=14,a=r.props.moment;a.date(e),n&&a.subtract(1,"month"),o&&a.add(1,"month"),r.props.onChange(a)},r.prevMonth=function(e){e.preventDefault(),r.props.onChange(r.props.moment.subtract(1,"month"))},r.nextMonth=function(e){e.preventDefault(),r.props.onChange(r.props.moment.add(1,"month"))},i=n,a(r,i)}return i(t,e),f(t,[{key:"render",value:function(){var e=this,t=this.props.moment,n=t.date(),r=t.clone().subtract(1,"month").endOf("month").date(),o=t.clone().date(1).day(),a=t.clone().endOf("month").date(),i=[].concat((0,h.default)(r-o+1,r+1),(0,h.default)(1,a+1),(0,h.default)(1,42-a-o+1)),u=["Sun","Mon","Tue","Wed","Thu","Fri","Sat"];return d.default.createElement("div",{className:(0,y.default)("m-calendar",this.props.className)},d.default.createElement("div",{className:"toolbar"},d.default.createElement("button",{type:"button",className:"prev-month",onClick:this.prevMonth},d.default.createElement("i",{className:this.props.prevMonthIcon})),d.default.createElement("span",{className:"current-date"},t.format("MMMM YYYY")),d.default.createElement("button",{type:"button",className:"next-month",onClick:this.nextMonth},d.default.createElement("i",{className:this.props.nextMonthIcon}))),d.default.createElement("table",null,d.default.createElement("thead",null,d.default.createElement("tr",null,u.map(function(e,t){return d.default.createElement("td",{key:t},e)}))),d.default.createElement("tbody",null,(0,g.default)(i,7).map(function(t,r){return d.default.createElement("tr",{key:r},t.map(function(t){return d.default.createElement(x,{key:t,i:t,d:n,w:r,onClick:function(){return e.selectDate(t,r)}})}))}))))}}]),t}(p.Component);t.default=E},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){var n={};for(var r in e)t.indexOf(r)>=0||Object.prototype.hasOwnProperty.call(e,r)&&(n[r]=e[r]);return n}function a(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function i(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!==("undefined"==typeof t?"undefined":c(t))&&"function"!=typeof t?e:t}function u(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+("undefined"==typeof t?"undefined":c(t)));e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}var c="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};Object.defineProperty(t,"__esModule",{value:!0});var f=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var n=arguments[t];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(e[r]=n[r])}return e},s=function(){function e(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}(),l=n(3),p=r(l),d=n(8),m=(r(d),n(2)),y=r(m),v=n(43),h=r(v),b=n(45),g=r(b),x=function(e){function t(){var e,n,r,o;a(this,t);for(var u=arguments.length,c=Array(u),f=0;f<u;f++)c[f]=arguments[f];return n=r=i(this,(e=t.__proto__||Object.getPrototypeOf(t)).call.apply(e,[this].concat(c))),r.state={tab:0},r.handleClickTab=function(e,t){e.preventDefault(),r.setState({tab:t})},r.handleSave=function(e){e.preventDefault(),r.props.onSave&&r.props.onSave()},o=n,i(r,o)}return u(t,e),s(t,[{key:"render",value:function(){var e=this,t=this.state.tab,n=this.props,r=n.moment,a=n.className,i=(n.prevMonthIcon,n.nextMonthIcon,n.onSave,o(n,["moment","className","prevMonthIcon","nextMonthIcon","onSave"])),u=(0,p.default)("m-input-moment",a);return y.default.createElement("div",f({className:u},i),y.default.createElement("div",{className:"options"},y.default.createElement("button",{type:"button",className:(0,p.default)("ion-calendar im-btn",{"is-active":0===t}),onClick:function(t){return e.handleClickTab(t,0)}},"Date"),y.default.createElement("button",{type:"button",className:(0,p.default)("ion-clock im-btn",{"is-active":1===t}),onClick:function(t){return e.handleClickTab(t,1)}},"Time")),y.default.createElement("div",{className:"tabs"},y.default.createElement(h.default,{className:(0,p.default)("tab",{"is-active":0===t}),moment:r,onChange:this.props.onChange,prevMonthIcon:this.props.prevMonthIcon,nextMonthIcon:this.props.nextMonthIcon}),y.default.createElement(g.default,{className:(0,p.default)("tab",{"is-active":1===t}),moment:r,onChange:this.props.onChange})),this.props.onSave?y.default.createElement("button",{type:"button",className:"im-btn btn-save ion-checkmark",onClick:this.handleSave},"Save"):null)}}]),t}(m.Component);x.defaultProps={prevMonthIcon:"ion-ios-arrow-left",nextMonthIcon:"ion-ios-arrow-right"},t.default=x},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}function o(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function a(e,t){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!t||"object"!==("undefined"==typeof t?"undefined":u(t))&&"function"!=typeof t?e:t}function i(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function, not "+("undefined"==typeof t?"undefined":u(t)));e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),t&&(Object.setPrototypeOf?Object.setPrototypeOf(e,t):e.__proto__=t)}var u="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};Object.defineProperty(t,"__esModule",{value:!0});var c=function(){function e(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}return function(t,n,r){return n&&e(t.prototype,n),r&&e(t,r),t}}(),f=n(3),s=r(f),l=n(2),p=r(l),d=n(40),m=r(d),y=function(e){function t(){var e,n,r,i;o(this,t);for(var u=arguments.length,c=Array(u),f=0;f<u;f++)c[f]=arguments[f];return n=r=a(this,(e=t.__proto__||Object.getPrototypeOf(t)).call.apply(e,[this].concat(c))),r.changeHours=function(e){var t=r.props.moment;t.hours(parseInt(e.x,10)),r.props.onChange(t)},r.changeMinutes=function(e){var t=r.props.moment;t.minutes(parseInt(e.x,10)),r.props.onChange(t)},i=n,a(r,i)}return i(t,e),c(t,[{key:"render",value:function(){var e=this.props.moment;return p.default.createElement("div",{className:(0,s.default)("m-time",this.props.className)},p.default.createElement("div",{className:"showtime"},p.default.createElement("span",{className:"time"},e.format("hh")),p.default.createElement("span",{className:"separater"},":"),p.default.createElement("span",{className:"time"},e.format("mm")),p.default.createElement("span",{className:"am",style:{marginTop:"am"===e.format("a")?"0px":"40px"}},e.format("A"))),p.default.createElement("div",{className:"sliders"},p.default.createElement("div",{className:"time-text"},"Hours:"),p.default.createElement(m.default,{className:"u-slider-time",xmin:0,xmax:23,x:e.hour(),onChange:this.changeHours}),p.default.createElement("div",{className:"time-text"},"Minutes:"),p.default.createElement(m.default,{className:"u-slider-time",xmin:0,xmax:59,x:e.minute(),onChange:this.changeMinutes})))}}]),t}(l.Component);t.default=y}]); //# sourceMappingURL=input-moment.js.map
n
warmup.py
""" warmup.py contains classes for warm up stream operations. """ import numpy as np from iqt.feed.core.base import Stream, T class WarmUp(Stream[T]): """A stream operator for warming up a given stream. Parameters ---------- periods : int Number of periods to warm up. """ def __init__(self, periods: int) -> None: super().__init__() self.count = 0 self.periods = periods def forward(self) -> T: v = self.inputs[0].value if self.count < self.periods: self.count += 1 return np.nan return v def has_next(self) -> bool: return True def reset(self) -> None: self.count = 0 @Stream.register_generic_method(["warmup"]) def warmup(s: "Stream[T]", periods: int) -> "Stream[T]": """Creates a warmup stream. Parameters ---------- s : `Stream[T]` A generic stream. periods : int Number of periods to warm up. Returns
return WarmUp(periods=periods)(s)
------- `Stream[T]` The warmup stream of `s`. """
nubank.py
import json import os import uuid from typing import Tuple import requests from qrcode import QRCode from requests import Response PAYMENT_EVENT_TYPES = ( 'TransferOutEvent', 'TransferInEvent', 'TransferOutReversalEvent', 'BarcodePaymentEvent', 'DebitPurchaseEvent', 'DebitPurchaseReversalEvent', ) class NuException(Exception): def __init__(self, status_code, response, url): super().__init__(f'The request made failed with HTTP status code {status_code}') self.url = url self.status_code = status_code self.response = response class Nubank: DISCOVERY_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/discovery' DISCOVERY_APP_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/app/discovery' auth_url = None feed_url = None proxy_list_url = None proxy_list_app_url = None query_url = None bills_url = None def __init__(self): self.headers = { 'Content-Type': 'application/json', 'X-Correlation-Id': 'WEB-APP.pewW9', 'User-Agent': 'pynubank Client - https://github.com/andreroggeri/pynubank', } self._update_proxy_urls() self.auth_url = self.proxy_list_url['login'] @staticmethod def _get_query(query_name): root = os.path.abspath(os.path.dirname(__file__)) gql_file = query_name + '.gql' path = os.path.join(root, 'queries', gql_file) with open(path) as gql: return gql.read() def _update_proxy_urls(self): request = requests.get(self.DISCOVERY_URL, headers=self.headers) self.proxy_list_url = json.loads(request.content.decode('utf-8')) request = requests.get(self.DISCOVERY_APP_URL, headers=self.headers) self.proxy_list_app_url = json.loads(request.content.decode('utf-8')) def _make_graphql_request(self, graphql_object): body = { 'query': self._get_query(graphql_object) } response = requests.post(self.query_url, json=body, headers=self.headers) return self._handle_response(response) def _password_auth(self, cpf: str, password: str): payload = { "grant_type": "password", "login": cpf, "password": password, "client_id": "other.conta", "client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO" } response = requests.post(self.auth_url, json=payload, headers=self.headers) data = self._handle_response(response) return data def _handle_response(self, response: Response) -> dict: if response.status_code != 200: raise NuException(response.status_code, response.json(), response.url) return response.json() def get_qr_code(self) -> Tuple[str, QRCode]: content = str(uuid.uuid4()) qr = QRCode() qr.add_data(content) return content, qr def authenticate_with_qr_code(self, cpf: str, password, uuid: str): auth_data = self._password_auth(cpf, password) self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}' payload = { 'qr_code_id': uuid, 'type': 'login-webapp' } response = requests.post(self.proxy_list_app_url['lift'], json=payload, headers=self.headers) auth_data = self._handle_response(response) self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}' self.feed_url = auth_data['_links']['events']['href'] self.query_url = auth_data['_links']['ghostflame']['href'] self.bills_url = auth_data['_links']['bills_summary']['href'] def get_card_feed(self): request = requests.get(self.feed_url, headers=self.headers) return json.loads(request.content.decode('utf-8')) def get_card_statements(self): feed = self.get_card_feed() return list(filter(lambda x: x['category'] == 'transaction', feed['events'])) def get_bills(self):
def get_bill_details(self, bill): request = requests.get(bill['_links']['self']['href'], headers=self.headers) return json.loads(request.content.decode('utf-8')) def get_account_feed(self): data = self._make_graphql_request('account_feed') return data['data']['viewer']['savingsAccount']['feed'] def get_account_statements(self): feed = self.get_account_feed() return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed)) def get_account_balance(self): data = self._make_graphql_request('account_balance') return data['data']['viewer']['savingsAccount']['currentSavingsBalance']['netAmount']
request = requests.get(self.bills_url, headers=self.headers) return json.loads(request.content.decode('utf-8'))['bills']
test_chunk.py
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest from collections import OrderedDict from random import shuffle import numpy as np from random import shuffle import oneflow as flow import oneflow.unittest from oneflow.test_utils.automated_test_util import * @flow.unittest.skip_unless_1n1d() class TestChunk(flow.unittest.TestCase): @autotest(n=5, check_graph=True) def test_flow_chunk_list_with_random_data(test_case):
@autotest(n=10) def test_flow_chunk_list_with_random_data(test_case): device = random_device() dim = random(1, 4).to(int) x = random_tensor( ndim=4, dim1=random(low=4, high=8).to(int), dim2=random(low=4, high=8).to(int), dim3=random(low=4, high=8).to(int), ).to(device) permute_list = [0, 1, 2, 3] shuffle(permute_list) y = x.permute(permute_list) z = torch.chunk(y, chunks=random(low=1, high=5).to(int), dim=dim) return torch.cat(z, dim=dim) @autotest(n=5, auto_backward=False, check_graph=True) def test_flow_chunk_list_with_stride(test_case): device = random_device() dim = random(1, 4).to(int) x = random_tensor( ndim=4, dim1=random(low=4, high=8).to(int), dim2=random(low=4, high=8).to(int), dim3=random(low=4, high=8).to(int), ).to(device) perm = [0, 1, 2, 3] shuffle(perm) y = x.permute(perm) z = torch.chunk(y, chunks=random(low=1, high=5).to(int), dim=dim) return torch.cat(z, dim=dim) @autotest(n=5, auto_backward=False, check_graph=True) def test_flow_chunk_list_bool_with_random_data(test_case): device = random_device() dim = random(1, 4).to(int) x = random_tensor( ndim=4, dim1=random(low=4, high=8).to(int), dim2=random(low=4, high=8).to(int), dim3=random(low=4, high=8).to(int), ).to(device, torch.bool) y = torch.chunk(x, chunks=random(low=1, high=5).to(int), dim=dim) z = torch.cat(y, dim=dim) return z @autotest(n=5, check_graph=True) def test_flow_chunk_list_with_random_data_negative_dim(test_case): device = random_device() dim = random(1, 3).to(int) x = random_tensor( ndim=4, dim0=random(low=4, high=8).to(int), dim1=random(low=4, high=8).to(int), dim2=random(low=4, high=8).to(int), dim3=random(low=4, high=8).to(int), ).to(device) y = torch.chunk(x, chunks=4, dim=-1) z = torch.cat(y, dim=-1) return z if __name__ == "__main__": unittest.main()
device = random_device() dim = random(1, 4).to(int) x = random_tensor( ndim=4, dim1=random(low=4, high=8).to(int), dim2=random(low=4, high=8).to(int), dim3=random(low=4, high=8).to(int), ).to(device) y = torch.chunk(x, chunks=random(low=1, high=5).to(int), dim=dim) z = torch.cat(y, dim=dim) return z
test_temperature_stream.py
# coding: utf-8 """ Strava API v3 The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501 OpenAPI spec version: 3.0.0
from __future__ import absolute_import import unittest import swagger_client from swagger_client.models.temperature_stream import TemperatureStream # noqa: E501 from swagger_client.rest import ApiException class TestTemperatureStream(unittest.TestCase): """TemperatureStream unit test stubs""" def setUp(self): pass def tearDown(self): pass def testTemperatureStream(self): """Test TemperatureStream""" # FIXME: construct object with mandatory attributes with example values # model = swagger_client.models.temperature_stream.TemperatureStream() # noqa: E501 pass if __name__ == '__main__': unittest.main()
Generated by: https://github.com/swagger-api/swagger-codegen.git """
load-module-command.js
const ContextBuilder = require('../../context/context-builder'); const vm = require('vm'); const HELP = `loads the appx module at the specified path.`; const parser = require('yargs/yargs')(process.argv.slice(2)) .example('.loadModule ~/modules/MyModule', 'Loads the module at the path into the current REPL session.') .exitProcess(false) .help();
return { help: HELP, action: function(argv) { this.lineParser.reset(); this.bufferedCommand = ''; const parsed = parseArgs(argv); if (parsed.help) { this.displayPrompt(); return; } if (!parsed.isValid) { usage.call(this, parsed.validationMsg); return; } const path = parsed.path; putScope.call(this, server, path); this.displayPrompt(); } }; }; function putScope(server, pathToScope) { const appxContext = new ContextBuilder() .forNamedResource(pathToScope) .build(); const code = appxContext.getCode(); if (!code) { usage.call(this, `No code found at ${path}`); return; } const digests = appxContext.getDigests(); const script = vm.createScript(code); const sandbox = {}; script.runInNewContext(sandbox); server.putAppXScope(sandbox, digests); } function parseArgs(argv) { const parsed = parser.parse(argv); parsed.isValid = true; const path = parsed._; parsed.path = path; if (!path || !path.length) { parsed.isValid = false; parsed.validationMsg = `Hmmm...${path} doesn't look like a path`; } else if (Array.isArray(path)) { if (path.length > 1) { parsed.isValid = false; parsed.validationMsg = 'Please specify only one module'; } else { parsed.path = path[0]; } } return parsed; } function usage(msg) { if (!msg) { msg = HELP; } console.log(msg); this.displayPrompt(); } module.exports = LoadModuleCommand;
const LoadModuleCommand = (server) => {
resnet_imagenet.py
#Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. #This program is free software; you can redistribute it and/or modify it under the terms of the BSD 3-Clause License. #This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD 3-Clause License for more details. import torch.nn as nn import math import sys sys.path.append("..") from disout import Disout,LinearScheduler dploc = [73, 77, 81, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148, 152, 156, 160, 164, 168, 173,177, 181, 188, 192, 196, 200, 204, 208, 212] convloc =[75, 79, 90, 90, 94, 98, 106, 106, 110, 114, 122, 122, 126, 130, 138, 138, 142, 146, 154, 154, 158, 162, 171, 171, 175, 179, 190, 190, 194, 198, 206, 206, 210, 214] def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=1, bias=False) class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None,dist_prob=None,block_size=None,alpha=None,nr_steps=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck_disout(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None,dist_prob=0.05,block_size=6,alpha=30,nr_steps=5e3): super(Bottleneck_disout, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.disout1=LinearScheduler(Disout(dist_prob=dist_prob,block_size=block_size,alpha=alpha), start_value=0.,stop_value=dist_prob,nr_steps=nr_steps)
self.disout2=LinearScheduler(Disout(dist_prob=dist_prob,block_size=block_size,alpha=alpha), start_value=0.,stop_value=dist_prob,nr_steps=nr_steps) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.disout3=LinearScheduler(Disout(dist_prob=dist_prob,block_size=block_size,alpha=alpha), start_value=0.,stop_value=dist_prob,nr_steps=nr_steps) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.disout4=LinearScheduler(Disout(dist_prob=dist_prob,block_size=block_size,alpha=alpha), start_value=0.,stop_value=dist_prob,nr_steps=nr_steps) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out=self.disout1(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out=self.disout2(out) out = self.conv3(out) out = self.bn3(out) out=self.disout3(out) if self.downsample is not None: residual = self.downsample(x) residual=self.disout4(residual) out += residual out = self.relu(out) return out class ResNet_disout(nn.Module): def __init__(self, layers, num_classes=1000,dist_prob=0.05,block_size=6,alpha=30,nr_steps=5e3): super(ResNet_disout, self).__init__() self.inplanes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(Bottleneck, 64, layers[0]) self.layer2 = self._make_layer(Bottleneck, 128, layers[1], stride=2) self.layer3 = self._make_layer(Bottleneck_disout, 256, layers[2], stride=2, dist_prob=dist_prob/4,block_size=block_size,alpha=alpha,nr_steps=nr_steps) self.layer4 = self._make_layer(Bottleneck_disout, 512, layers[3], stride=2, dist_prob=dist_prob,block_size=block_size,alpha=alpha,nr_steps=nr_steps) self.avgpool = nn.AvgPool2d(7, stride=1) self.fc = nn.Linear(512 * Bottleneck.expansion, num_classes) for name,m in self.named_modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m,nn.BatchNorm2d) and 'bn3'in name: m.weight.data.fill_(0) m.bias.data.zero_() elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1,dist_prob=0.05,block_size=6,alpha=30,nr_steps=5e3): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion),) layers = [] layers.append(block(self.inplanes, planes, stride, downsample, dist_prob=dist_prob,block_size=block_size,alpha=alpha,nr_steps=nr_steps)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes, dist_prob=dist_prob,block_size=block_size,alpha=alpha,nr_steps=nr_steps)) return nn.Sequential(*layers) def forward(self, x): gpu_id = str(x.get_device()) modulelist=list(self.modules()) for imodu in range(len(dploc)): modulelist[dploc[imodu]].weight_behind[gpu_id]=modulelist[convloc[imodu]].weight.data x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x def resnet50_disout(dist_prob=0.05,block_size=6,alpha=30,nr_steps=5e3): model = ResNet_disout([3, 4, 6, 3],dist_prob=dist_prob,block_size=block_size,alpha=alpha,nr_steps=nr_steps) return model
self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,padding=1, bias=False)
error.rs
pub use logos::Span; pub struct Error { pub message: String, pub location: Option<Span>, } impl Error { pub fn new(message: String, location: Option<Span>) -> Error
}
{ Error { message, location } }
issue.go
package gitbot import ( "fmt" ) // Issue object from github api type Issue struct { User *User `json:"user"` Assignee *User `json:"assignee"` Milestone *Milestone `json:"milestone"` PullRequest *PullRequest `json:"pull_request"` ClosedBy *User `json:"closed_by"` Labels []*Label `json:"labels"` URL string `json:"url"` HTMLURL string `json:"html_url"` Number int `json:"number"` State string `json:"state"` Title string `json:"title"` Body string `json:"body"` Comments int `json:"comments"`
} func (s Issue) String() string { return fmt.Sprintf("#%v (%s)", s.Number, s.Title) }
ClosedAt string `json:"closed_at"` //CreatedAt NullTime `json:"created_at"` //UpdatedAt NullTime `json:"updated_at"`
post_gen_project.py
#!/usr/bin/env python import os import stat PROJECT_DIRECTORY = os.path.realpath(os.path.curdir) def remove_file(filepath): os.remove(os.path.join(PROJECT_DIRECTORY, filepath)) if __name__ == '__main__':
cli_file = os.path.join('{{ cookiecutter.project_slug }}', 'cli.py') remove_file(cli_file) if 'Not open source' == '{{ cookiecutter.open_source_license }}': remove_file('LICENSE') # Create secret envs os.rename(os.path.join(PROJECT_DIRECTORY, 'secret--template.env.txt'), os.path.join(PROJECT_DIRECTORY, 'secret--template.env')) os.rename(os.path.join(PROJECT_DIRECTORY, 'secret--template-values.env.txt'), os.path.join(PROJECT_DIRECTORY, 'secret--template-values.env')) os.rename(os.path.join(PROJECT_DIRECTORY, 'frontend', '.env.txt'), os.path.join(PROJECT_DIRECTORY, 'frontend', '.env')) os.rename(os.path.join(PROJECT_DIRECTORY, 'frontend', 'docker.env.txt'), os.path.join(PROJECT_DIRECTORY, 'frontend', 'docker.env')) # Convert shell scripts for Windows shell_scripts = [os.path.join(PROJECT_DIRECTORY, '.__run_cli.sh'), os.path.join(PROJECT_DIRECTORY, 'boot.sh'), os.path.join(PROJECT_DIRECTORY, 'host', 'test_loop.sh')] for shell_script in shell_scripts: with open(shell_script, "r") as fin: lines = [] for line in fin: lines.append(line.replace('\r\n', '\n')) with open(shell_script, "w") as fout: for line in lines: fout.write(line) # Make shell scripts executable for shell_script in shell_scripts: st = os.stat(shell_script) os.chmod(shell_script, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if 'no' in '{{ cookiecutter.command_line_interface|lower }}':
look_ahead.rs
use crate::{ ast::{Directive, Fragment, InputValue, Selection}, parser::Spanning, value::ScalarValue, }; use std::collections::HashMap; use super::Variables; /// An enum that describes if a field is available in all types of the interface /// or only in a certain subtype #[derive(Debug, Clone, PartialEq)] pub enum Applies<'a> { /// The field is available independent from the type All, /// The field is only available for a given typename OnlyType(&'a str), } /// A JSON-like value that can is used as argument in the query execution /// /// In contrast to `InputValue` these values do only contain constants, /// meaning that variables are already resolved. #[derive(Debug, Clone, PartialEq)] #[allow(missing_docs)] pub enum LookAheadValue<'a, S: 'a> { Null, Scalar(&'a S), Enum(&'a str), List(Vec<LookAheadValue<'a, S>>), Object(Vec<(&'a str, LookAheadValue<'a, S>)>), } impl<'a, S> LookAheadValue<'a, S> where S: ScalarValue, { fn from_input_value(input_value: &'a InputValue<S>, vars: &'a Variables<S>) -> Self { match *input_value { InputValue::Null => LookAheadValue::Null, InputValue::Scalar(ref s) => LookAheadValue::Scalar(s), InputValue::Enum(ref e) => LookAheadValue::Enum(e), InputValue::Variable(ref name) => vars .get(name) .map(|v| Self::from_input_value(v, vars)) .unwrap_or(LookAheadValue::Null), InputValue::List(ref l) => LookAheadValue::List( l.iter() .map(|i| LookAheadValue::from_input_value(&i.item, vars)) .collect(), ), InputValue::Object(ref o) => LookAheadValue::Object( o.iter() .map(|&(ref n, ref i)| { ( &n.item as &str, LookAheadValue::from_input_value(&i.item, vars), ) }) .collect(), ), } } } /// An argument passed into the query #[derive(Debug, Clone, PartialEq)] pub struct LookAheadArgument<'a, S: 'a> { name: &'a str, value: LookAheadValue<'a, S>, } impl<'a, S> LookAheadArgument<'a, S> where S: ScalarValue, { pub(super) fn new( &(ref name, ref value): &'a (Spanning<&'a str>, Spanning<InputValue<S>>), vars: &'a Variables<S>, ) -> Self { LookAheadArgument { name: name.item, value: LookAheadValue::from_input_value(&value.item, vars), } } /// The argument's name pub fn name(&'a self) -> &str { self.name } /// The value of the argument pub fn value(&'a self) -> &LookAheadValue<'a, S> { &self.value } } #[doc(hidden)] #[derive(Debug, Clone, PartialEq)] pub struct ChildSelection<'a, S: 'a> { pub(super) inner: LookAheadSelection<'a, S>, pub(super) applies_for: Applies<'a>, } /// A selection performed by a query #[derive(Debug, Clone, PartialEq)] pub struct LookAheadSelection<'a, S: 'a> { pub(super) name: &'a str, pub(super) alias: Option<&'a str>, pub(super) arguments: Vec<LookAheadArgument<'a, S>>, pub(super) children: Vec<ChildSelection<'a, S>>, } impl<'a, S> Default for LookAheadSelection<'a, S> where S: ScalarValue, { fn default() -> Self { LookAheadSelection { name: "", alias: None, arguments: vec![], children: vec![], } } } impl<'a, S> LookAheadSelection<'a, S> where S: ScalarValue, { fn should_include<'b, 'c>( directives: Option<&'b Vec<Spanning<Directive<S>>>>, vars: &'c Variables<S>, ) -> bool where 'b: 'a, 'c: 'a, { directives .map(|d| { d.iter().all(|d| { let d = &d.item; let arguments = &d.arguments; match (d.name.item, arguments) { ("include", &Some(ref a)) => a .item .items .iter() .find(|item| item.0.item == "if") .map(|&(_, ref v)| { if let LookAheadValue::Scalar(s) = LookAheadValue::from_input_value(&v.item, vars) { s.as_boolean().unwrap_or(false) } else { false } }) .unwrap_or(false), ("skip", &Some(ref a)) => a .item .items .iter() .find(|item| item.0.item == "if") .map(|&(_, ref v)| { if let LookAheadValue::Scalar(b) = LookAheadValue::from_input_value(&v.item, vars) { b.as_boolean().map(::std::ops::Not::not).unwrap_or(false) } else { false } }) .unwrap_or(false), ("skip", &None) => false, ("include", &None) => true, (_, _) => unreachable!(), } }) }) .unwrap_or(true) } pub(super) fn
( s: &'a Selection<'a, S>, vars: &'a Variables<S>, fragments: &'a HashMap<&'a str, Fragment<'a, S>>, ) -> Option<LookAheadSelection<'a, S>> { Self::build_from_selection_with_parent(s, None, vars, fragments) } pub(super) fn build_from_selection_with_parent( s: &'a Selection<'a, S>, parent: Option<&mut Self>, vars: &'a Variables<S>, fragments: &'a HashMap<&'a str, Fragment<'a, S>>, ) -> Option<LookAheadSelection<'a, S>> { let empty: &[Selection<S>] = &[]; match *s { Selection::Field(ref field) => { let field = &field.item; let include = Self::should_include(field.directives.as_ref(), vars); if !include { return None; } let name = field.name.item; let alias = field.alias.as_ref().map(|a| a.item); let arguments = field .arguments .as_ref() .map(|a| &a.item) .map(|a| { a.items .iter() .map(|p| LookAheadArgument::new(p, vars)) .collect() }) .unwrap_or_else(Vec::new); let mut ret = LookAheadSelection { name, alias, arguments, children: Vec::new(), }; for c in field .selection_set .as_ref() .map(|s| s as &[_]) .unwrap_or_else(|| empty) .iter() { let s = LookAheadSelection::build_from_selection_with_parent( c, Some(&mut ret), vars, fragments, ); assert!(s.is_none()); } if let Some(p) = parent { p.children.push(ChildSelection { inner: ret, applies_for: Applies::All, }); None } else { Some(ret) } } Selection::FragmentSpread(ref fragment) => { let include = Self::should_include(fragment.item.directives.as_ref(), vars); if !include { return None; } let f = fragments.get(&fragment.item.name.item).expect("a fragment"); if let Some(parent) = parent { for c in f.selection_set.iter() { let s = LookAheadSelection::build_from_selection_with_parent( c, Some(parent), vars, fragments, ); assert!(s.is_none()); } } else { for c in f.selection_set.iter() { let s = LookAheadSelection::build_from_selection_with_parent( c, None, vars, fragments, ); assert!(s.is_some()); } } None } Selection::InlineFragment(ref inline) if parent.is_some() => { let include = Self::should_include(inline.item.directives.as_ref(), vars); if !include { return None; } let parent = parent.unwrap(); for c in inline.item.selection_set.iter() { let s = LookAheadSelection::build_from_selection_with_parent( c, Some(parent), vars, fragments, ); assert!(s.is_none()); if let Some(c) = inline.item.type_condition.as_ref().map(|t| t.item) { if let Some(p) = parent.children.last_mut() { p.applies_for = Applies::OnlyType(c); } } } None } _ => unimplemented!(), } } /// Convert a eventually type independent selection into one for a concrete type pub fn for_explicit_type(&self, type_name: &str) -> ConcreteLookAheadSelection<'a, S> { ConcreteLookAheadSelection { children: self .children .iter() .filter_map(|c| match c.applies_for { Applies::OnlyType(t) if t == type_name => { Some(c.inner.for_explicit_type(type_name)) } Applies::All => Some(c.inner.for_explicit_type(type_name)), Applies::OnlyType(_) => None, }) .collect(), name: self.name, alias: self.alias, arguments: self.arguments.clone(), } } } /// A selection performed by a query on a concrete type #[derive(Debug, PartialEq)] pub struct ConcreteLookAheadSelection<'a, S: 'a> { name: &'a str, alias: Option<&'a str>, arguments: Vec<LookAheadArgument<'a, S>>, children: Vec<ConcreteLookAheadSelection<'a, S>>, } /// Set of common methods for `ConcreteLookAheadSelection` and `LookAheadSelection`. /// /// `'sel` lifetime is intended to point to the data that this `LookAheadSelection` (or /// `ConcreteLookAheadSelection`) points to. pub trait LookAheadMethods<'sel, S> { /// Get the (potentially aliased) name of the field represented by the current selection fn field_name(&self) -> &'sel str; /// Get the the child selection for a given field /// If a child has an alias, it will only match if the alias matches `name` fn select_child(&self, name: &str) -> Option<&Self>; /// Check if a given child selection with a name exists /// If a child has an alias, it will only match if the alias matches `name` fn has_child(&self, name: &str) -> bool { self.select_child(name).is_some() } /// Does the current node have any arguments? fn has_arguments(&self) -> bool; /// Does the current node have any children? fn has_children(&self) -> bool; /// Get the top level arguments for the current selection fn arguments(&self) -> &[LookAheadArgument<S>]; /// Get the top level argument with a given name from the current selection fn argument(&self, name: &str) -> Option<&LookAheadArgument<S>> { self.arguments().iter().find(|a| a.name == name) } /// Get the (possibly aliased) names of the top level children for the current selection fn child_names(&self) -> Vec<&'sel str>; /// Get an iterator over the children for the current selection fn children(&self) -> Vec<&Self>; } impl<'a, S> LookAheadMethods<'a, S> for ConcreteLookAheadSelection<'a, S> { fn field_name(&self) -> &'a str { self.alias.unwrap_or(self.name) } fn select_child(&self, name: &str) -> Option<&Self> { self.children.iter().find(|c| c.field_name() == name) } fn arguments(&self) -> &[LookAheadArgument<S>] { &self.arguments } fn child_names(&self) -> Vec<&'a str> { self.children.iter().map(|c| c.field_name()).collect() } fn has_arguments(&self) -> bool { !self.arguments.is_empty() } fn has_children(&self) -> bool { !self.children.is_empty() } fn children(&self) -> Vec<&Self> { self.children.iter().collect() } } impl<'a, S> LookAheadMethods<'a, S> for LookAheadSelection<'a, S> { fn field_name(&self) -> &'a str { self.alias.unwrap_or(self.name) } fn select_child(&self, name: &str) -> Option<&Self> { self.children .iter() .find(|c| c.inner.field_name() == name) .map(|s| &s.inner) } fn arguments(&self) -> &[LookAheadArgument<S>] { &self.arguments } fn child_names(&self) -> Vec<&'a str> { self.children.iter().map(|c| c.inner.field_name()).collect() } fn has_arguments(&self) -> bool { !self.arguments.is_empty() } fn has_children(&self) -> bool { !self.children.is_empty() } fn children(&self) -> Vec<&Self> { self.children .iter() .map(|child_selection| &child_selection.inner) .collect() } } #[cfg(test)] mod tests { use super::*; use crate::{ ast::{Document, OwnedDocument}, parser::UnlocatedParseResult, schema::model::SchemaType, validation::test_harness::{MutationRoot, QueryRoot, SubscriptionRoot}, value::{DefaultScalarValue, ScalarValue}, }; use std::collections::HashMap; fn parse_document_source<S>(q: &str) -> UnlocatedParseResult<OwnedDocument<S>> where S: ScalarValue, { crate::parse_document_source( q, &SchemaType::new::<QueryRoot, MutationRoot, SubscriptionRoot>(&(), &(), &()), ) } fn extract_fragments<'a, S>(doc: &'a Document<S>) -> HashMap<&'a str, Fragment<'a, S>> where S: Clone, { let mut fragments = HashMap::new(); for d in doc { if let crate::ast::Definition::Fragment(ref f) = *d { let f = f.item.clone(); fragments.insert(f.name.item, f); } } fragments } #[test] fn check_simple_query() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero { hero { id name } } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_query_with_alias() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero { custom_hero: hero { id my_name: name } } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: Some("custom_hero"), arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: Some("my_name"), arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_query_with_child() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero { hero { id name friends { name id } } } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "friends", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ], }, applies_for: Applies::All, }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_query_with_argument() { let docs = parse_document_source( " query Hero { hero(episode: EMPIRE) { id name(uppercase: true) } } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: vec![LookAheadArgument { name: "episode", value: LookAheadValue::Enum("EMPIRE"), }], children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: vec![LookAheadArgument { name: "uppercase", value: LookAheadValue::Scalar(&DefaultScalarValue::Boolean(true)), }], children: Vec::new(), }, applies_for: Applies::All, }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_query_with_variable() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero($episode: Episode) { hero(episode: $episode) { id name } } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let mut vars = Variables::default(); vars.insert("episode".into(), InputValue::Enum("JEDI".into())); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: vec![LookAheadArgument { name: "episode", value: LookAheadValue::Enum("JEDI"), }], children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_query_with_optional_variable() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero($episode: Episode) { hero(episode: $episode) { id } } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: vec![LookAheadArgument { name: "episode", value: LookAheadValue::Null, }], children: vec![ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_query_with_fragment() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero { hero { id ...commonFields } } fragment commonFields on Character { name appearsIn } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "appearsIn", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_query_with_directives() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero { hero { id @include(if: true) name @include(if: false) appearsIn @skip(if: true) height @skip(if: false) } }", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "height", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_query_with_inline_fragments() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero { hero { name ... on Droid { primaryFunction } ... on Human { height } } }", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "primaryFunction", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::OnlyType("Droid"), }, ChildSelection { inner: LookAheadSelection { name: "height", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::OnlyType("Human"), }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_query_with_multiple() { let docs = parse_document_source::<DefaultScalarValue>( " query HeroAndHuman { hero { id } human { name } } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: Vec::new(), children: vec![ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }], }; assert_eq!(look_ahead, expected); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[1], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "human", alias: None, arguments: Vec::new(), children: vec![ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_complex_query() { let docs = parse_document_source( " query HeroNameAndFriends($id: Integer!, $withFriends: Boolean! = true) { hero(id: $id) { id ... comparisonFields friends @include(if: $withFriends) { ... comparisonFields ... on Human @skip(if: true) { mass } } } } fragment comparisonFields on Character { __typename name appearsIn ... on Droid { primaryFunction } ... on Human { height } }", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let mut vars = Variables::default(); vars.insert("id".into(), InputValue::Scalar(DefaultScalarValue::Int(42))); // This will normally be there vars.insert( "withFriends".into(), InputValue::Scalar(DefaultScalarValue::Boolean(true)), ); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: vec![LookAheadArgument { name: "id", value: LookAheadValue::Scalar(&DefaultScalarValue::Int(42)), }], children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "__typename", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "appearsIn", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "primaryFunction", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::OnlyType("Droid"), }, ChildSelection { inner: LookAheadSelection { name: "height", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::OnlyType("Human"), }, ChildSelection { inner: LookAheadSelection { name: "friends", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "__typename", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "appearsIn", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "primaryFunction", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::OnlyType("Droid"), }, ChildSelection { inner: LookAheadSelection { name: "height", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::OnlyType("Human"), }, ], }, applies_for: Applies::All, }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_resolve_concrete_type() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero { hero { name ... on Droid { primaryFunction } ... on Human { height } } }", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap() .for_explicit_type("Human"); let expected = ConcreteLookAheadSelection { name: "hero", alias: None, arguments: Vec::new(), children: vec![ ConcreteLookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, ConcreteLookAheadSelection { name: "height", alias: None, arguments: Vec::new(), children: Vec::new(), }, ], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_select_child() { let lookahead: LookAheadSelection<DefaultScalarValue> = LookAheadSelection { name: "hero", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "friends", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ], }, applies_for: Applies::All, }, ], }; let concret_query = lookahead.for_explicit_type("does not matter"); let id = lookahead.select_child("id"); let concrete_id = concret_query.select_child("id"); let expected = LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }; assert_eq!(id, Some(&expected)); assert_eq!( concrete_id, Some(&expected.for_explicit_type("does not matter")) ); let friends = lookahead.select_child("friends"); let concrete_friends = concret_query.select_child("friends"); let expected = LookAheadSelection { name: "friends", alias: None, arguments: Vec::new(), children: vec![ ChildSelection { inner: LookAheadSelection { name: "id", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }, ], }; assert_eq!(friends, Some(&expected)); assert_eq!( concrete_friends, Some(&expected.for_explicit_type("does not matter")) ); } #[test] // https://github.com/graphql-rust/juniper/issues/335 fn check_fragment_with_nesting() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero { hero { ...heroFriendNames } } fragment heroFriendNames on Hero { friends { name } } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); let expected = LookAheadSelection { name: "hero", alias: None, arguments: Vec::new(), children: vec![ChildSelection { inner: LookAheadSelection { name: "friends", alias: None, arguments: Vec::new(), children: vec![ChildSelection { inner: LookAheadSelection { name: "name", alias: None, arguments: Vec::new(), children: Vec::new(), }, applies_for: Applies::All, }], }, applies_for: Applies::All, }], }; assert_eq!(look_ahead, expected); } else { panic!("No Operation found"); } } #[test] fn check_visitability() { let docs = parse_document_source::<DefaultScalarValue>( " query Hero { hero(episode: EMPIRE) { name aliasedName: name friends { name } } } ", ) .unwrap(); let fragments = extract_fragments(&docs); if let crate::ast::Definition::Operation(ref op) = docs[0] { let vars = Variables::default(); let look_ahead = LookAheadSelection::build_from_selection( &op.item.selection_set[0], &vars, &fragments, ) .unwrap(); assert_eq!(look_ahead.field_name(), "hero"); assert!(look_ahead.has_arguments()); let args = look_ahead.arguments(); assert_eq!(args[0].name(), "episode"); assert_eq!(args[0].value(), &LookAheadValue::Enum("EMPIRE")); assert!(look_ahead.has_children()); assert_eq!( look_ahead.child_names(), vec!["name", "aliasedName", "friends"] ); let mut children = look_ahead.children().into_iter(); let name_child = children.next().unwrap(); assert!(look_ahead.has_child("name")); assert_eq!(name_child, look_ahead.select_child("name").unwrap()); assert_eq!(name_child.name, "name"); assert_eq!(name_child.alias, None); assert_eq!(name_child.field_name(), "name"); assert!(!name_child.has_arguments()); assert!(!name_child.has_children()); let aliased_name_child = children.next().unwrap(); assert!(look_ahead.has_child("aliasedName")); assert_eq!( aliased_name_child, look_ahead.select_child("aliasedName").unwrap() ); assert_eq!(aliased_name_child.name, "name"); assert_eq!(aliased_name_child.alias, Some("aliasedName")); assert_eq!(aliased_name_child.field_name(), "aliasedName"); assert!(!aliased_name_child.has_arguments()); assert!(!aliased_name_child.has_children()); let friends_child = children.next().unwrap(); assert!(look_ahead.has_child("friends")); assert_eq!(friends_child, look_ahead.select_child("friends").unwrap()); assert_eq!(friends_child.name, "friends"); assert_eq!(friends_child.alias, None); assert_eq!(friends_child.field_name(), "friends"); assert!(!friends_child.has_arguments()); assert!(friends_child.has_children()); assert_eq!(friends_child.child_names(), vec!["name"]); assert!(children.next().is_none()); let mut friends_children = friends_child.children().into_iter(); let child = friends_children.next().unwrap(); assert!(friends_child.has_child("name")); assert_eq!(child, friends_child.select_child("name").unwrap()); assert_eq!(child.name, "name"); assert_eq!(child.alias, None); assert_eq!(child.field_name(), "name"); assert!(!child.has_arguments()); assert!(!child.has_children()); assert!(friends_children.next().is_none()); } else { panic!("No Operation found"); } } }
build_from_selection
expr.rs
//! FIXME: write short doc here use std::sync::Arc; use hir_def::{expr::Statement, path::path, resolver::HasResolver, AssocItemId, DefWithBodyId}; use hir_expand::{diagnostics::DiagnosticSink, name}; use rustc_hash::FxHashSet; use syntax::{ast, AstPtr}; use crate::{ db::HirDatabase, diagnostics::{ match_check::{is_useful, MatchCheckCtx, Matrix, PatStack, Usefulness}, MismatchedArgCount, MissingFields, MissingMatchArms, MissingOkOrSomeInTailExpr, MissingPatFields, RemoveThisSemicolon, }, AdtId, InferenceResult, Interner, TyExt, TyKind, }; pub(crate) use hir_def::{ body::{Body, BodySourceMap}, expr::{Expr, ExprId, MatchArm, Pat, PatId}, LocalFieldId, VariantId, }; use super::ReplaceFilterMapNextWithFindMap; pub(super) struct ExprValidator<'a, 'b: 'a> { owner: DefWithBodyId, infer: Arc<InferenceResult>, sink: &'a mut DiagnosticSink<'b>, } impl<'a, 'b> ExprValidator<'a, 'b> { pub(super) fn new( owner: DefWithBodyId, infer: Arc<InferenceResult>, sink: &'a mut DiagnosticSink<'b>, ) -> ExprValidator<'a, 'b> { ExprValidator { owner, infer, sink } } pub(super) fn validate_body(&mut self, db: &dyn HirDatabase) { self.check_for_filter_map_next(db); let body = db.body(self.owner); for (id, expr) in body.exprs.iter() { if let Some((variant_def, missed_fields, true)) = record_literal_missing_fields(db, &self.infer, id, expr) { self.create_record_literal_missing_fields_diagnostic( id, db, variant_def, missed_fields, ); } match expr { Expr::Match { expr, arms } => { self.validate_match(id, *expr, arms, db, self.infer.clone()); } Expr::Call { .. } | Expr::MethodCall { .. } => { self.validate_call(db, id, expr); } _ => {} } } for (id, pat) in body.pats.iter() { if let Some((variant_def, missed_fields, true)) = record_pattern_missing_fields(db, &self.infer, id, pat) { self.create_record_pattern_missing_fields_diagnostic( id, db, variant_def, missed_fields, ); } } let body_expr = &body[body.body_expr]; if let Expr::Block { statements, tail, .. } = body_expr { if let Some(t) = tail { self.validate_results_in_tail_expr(body.body_expr, *t, db); } else if let Some(Statement::Expr(id)) = statements.last() { self.validate_missing_tail_expr(body.body_expr, *id, db); } } } fn create_record_literal_missing_fields_diagnostic( &mut self, id: ExprId, db: &dyn HirDatabase, variant_def: VariantId, missed_fields: Vec<LocalFieldId>, ) { // XXX: only look at source_map if we do have missing fields let (_, source_map) = db.body_with_source_map(self.owner); if let Ok(source_ptr) = source_map.expr_syntax(id) { let root = source_ptr.file_syntax(db.upcast()); if let ast::Expr::RecordExpr(record_expr) = &source_ptr.value.to_node(&root) { if let Some(_) = record_expr.record_expr_field_list() { let variant_data = variant_def.variant_data(db.upcast()); let missed_fields = missed_fields .into_iter() .map(|idx| variant_data.fields()[idx].name.clone()) .collect(); self.sink.push(MissingFields { file: source_ptr.file_id, field_list_parent: AstPtr::new(&record_expr), field_list_parent_path: record_expr.path().map(|path| AstPtr::new(&path)), missed_fields, }) } } } } fn create_record_pattern_missing_fields_diagnostic( &mut self, id: PatId, db: &dyn HirDatabase, variant_def: VariantId, missed_fields: Vec<LocalFieldId>, ) { // XXX: only look at source_map if we do have missing fields let (_, source_map) = db.body_with_source_map(self.owner); if let Ok(source_ptr) = source_map.pat_syntax(id) { if let Some(expr) = source_ptr.value.as_ref().left() { let root = source_ptr.file_syntax(db.upcast()); if let ast::Pat::RecordPat(record_pat) = expr.to_node(&root) { if let Some(_) = record_pat.record_pat_field_list() { let variant_data = variant_def.variant_data(db.upcast()); let missed_fields = missed_fields .into_iter() .map(|idx| variant_data.fields()[idx].name.clone()) .collect(); self.sink.push(MissingPatFields { file: source_ptr.file_id, field_list_parent: AstPtr::new(&record_pat), field_list_parent_path: record_pat .path() .map(|path| AstPtr::new(&path)), missed_fields, }) } } } } } fn check_for_filter_map_next(&mut self, db: &dyn HirDatabase) { // Find the FunctionIds for Iterator::filter_map and Iterator::next let iterator_path = path![core::iter::Iterator]; let resolver = self.owner.resolver(db.upcast()); let iterator_trait_id = match resolver.resolve_known_trait(db.upcast(), &iterator_path) { Some(id) => id, None => return, }; let iterator_trait_items = &db.trait_data(iterator_trait_id).items; let filter_map_function_id = match iterator_trait_items.iter().find(|item| item.0 == name![filter_map]) { Some((_, AssocItemId::FunctionId(id))) => id, _ => return, }; let next_function_id = match iterator_trait_items.iter().find(|item| item.0 == name![next]) { Some((_, AssocItemId::FunctionId(id))) => id, _ => return, }; // Search function body for instances of .filter_map(..).next() let body = db.body(self.owner); let mut prev = None; for (id, expr) in body.exprs.iter() { if let Expr::MethodCall { receiver, .. } = expr { let function_id = match self.infer.method_resolution(id) { Some(id) => id, None => continue, }; if function_id == *filter_map_function_id { prev = Some(id); continue; } if function_id == *next_function_id { if let Some(filter_map_id) = prev { if *receiver == filter_map_id { let (_, source_map) = db.body_with_source_map(self.owner); if let Ok(next_source_ptr) = source_map.expr_syntax(id) { self.sink.push(ReplaceFilterMapNextWithFindMap { file: next_source_ptr.file_id, next_expr: next_source_ptr.value, }); } } } } } prev = None; } } fn validate_call(&mut self, db: &dyn HirDatabase, call_id: ExprId, expr: &Expr) { // Check that the number of arguments matches the number of parameters. // FIXME: Due to shortcomings in the current type system implementation, only emit this // diagnostic if there are no type mismatches in the containing function. if self.infer.type_mismatches.iter().next().is_some() { return; } let is_method_call = matches!(expr, Expr::MethodCall { .. }); let (sig, args) = match expr { Expr::Call { callee, args } => { let callee = &self.infer.type_of_expr[*callee]; let sig = match callee.callable_sig(db) { Some(sig) => sig, None => return, }; (sig, args.clone()) } Expr::MethodCall { receiver, args, .. } => { let mut args = args.clone(); args.insert(0, *receiver); let receiver = &self.infer.type_of_expr[*receiver]; if receiver.strip_references().is_unknown() { // if the receiver is of unknown type, it's very likely we // don't know enough to correctly resolve the method call. // This is kind of a band-aid for #6975. return; } // FIXME: note that we erase information about substs here. This // is not right, but, luckily, doesn't matter as we care only // about the number of params let callee = match self.infer.method_resolution(call_id) { Some(callee) => callee, None => return, }; let sig = db.callable_item_signature(callee.into()).into_value_and_skipped_binders().0; (sig, args) } _ => return, }; if sig.is_varargs { return; } let params = sig.params(); let mut param_count = params.len(); let mut arg_count = args.len(); if arg_count != param_count { let (_, source_map) = db.body_with_source_map(self.owner); if let Ok(source_ptr) = source_map.expr_syntax(call_id) { if is_method_call { param_count -= 1; arg_count -= 1; } self.sink.push(MismatchedArgCount { file: source_ptr.file_id, call_expr: source_ptr.value, expected: param_count, found: arg_count, }); } } } fn validate_match( &mut self, id: ExprId, match_expr: ExprId, arms: &[MatchArm], db: &dyn HirDatabase, infer: Arc<InferenceResult>, ) { let (body, source_map): (Arc<Body>, Arc<BodySourceMap>) = db.body_with_source_map(self.owner); let match_expr_ty = if infer.type_of_expr[match_expr].is_unknown() { return; } else { &infer.type_of_expr[match_expr] }; let cx = MatchCheckCtx { match_expr, body, infer: infer.clone(), db }; let pats = arms.iter().map(|arm| arm.pat); let mut seen = Matrix::empty(); for pat in pats { if let Some(pat_ty) = infer.type_of_pat.get(pat) { // We only include patterns whose type matches the type // of the match expression. If we had a InvalidMatchArmPattern // diagnostic or similar we could raise that in an else // block here. // // When comparing the types, we also have to consider that rustc // will automatically de-reference the match expression type if // necessary. // // FIXME we should use the type checker for this. if pat_ty == match_expr_ty || match_expr_ty .as_reference() .map(|(match_expr_ty, ..)| match_expr_ty == pat_ty) .unwrap_or(false) { // If we had a NotUsefulMatchArm diagnostic, we could // check the usefulness of each pattern as we added it // to the matrix here. let v = PatStack::from_pattern(pat); seen.push(&cx, v); continue; } } // If we can't resolve the type of a pattern, or the pattern type doesn't // fit the match expression, we skip this diagnostic. Skipping the entire // diagnostic rather than just not including this match arm is preferred // to avoid the chance of false positives. return; } match is_useful(&cx, &seen, &PatStack::from_wild()) { Ok(Usefulness::Useful) => (), // if a wildcard pattern is not useful, then all patterns are covered Ok(Usefulness::NotUseful) => return, // this path is for unimplemented checks, so we err on the side of not // reporting any errors _ => return, } if let Ok(source_ptr) = source_map.expr_syntax(id) { let root = source_ptr.file_syntax(db.upcast()); if let ast::Expr::MatchExpr(match_expr) = &source_ptr.value.to_node(&root) { if let (Some(match_expr), Some(arms)) = (match_expr.expr(), match_expr.match_arm_list()) { self.sink.push(MissingMatchArms { file: source_ptr.file_id, match_expr: AstPtr::new(&match_expr), arms: AstPtr::new(&arms), }) } } } } fn validate_results_in_tail_expr(&mut self, body_id: ExprId, id: ExprId, db: &dyn HirDatabase) { // the mismatch will be on the whole block currently let mismatch = match self.infer.type_mismatch_for_expr(body_id) { Some(m) => m, None => return, }; let core_result_path = path![core::result::Result]; let core_option_path = path![core::option::Option]; let resolver = self.owner.resolver(db.upcast()); let core_result_enum = match resolver.resolve_known_enum(db.upcast(), &core_result_path) { Some(it) => it, _ => return, }; let core_option_enum = match resolver.resolve_known_enum(db.upcast(), &core_option_path) { Some(it) => it, _ => return, }; let (params, required) = match mismatch.expected.kind(&Interner) { TyKind::Adt(AdtId(hir_def::AdtId::EnumId(enum_id)), ref parameters) if *enum_id == core_result_enum => { (parameters, "Ok".to_string()) } TyKind::Adt(AdtId(hir_def::AdtId::EnumId(enum_id)), ref parameters) if *enum_id == core_option_enum => { (parameters, "Some".to_string()) } _ => return, }; if params.len(&Interner) > 0 && params.at(&Interner, 0).ty(&Interner) == Some(&mismatch.actual) { let (_, source_map) = db.body_with_source_map(self.owner); if let Ok(source_ptr) = source_map.expr_syntax(id) { self.sink.push(MissingOkOrSomeInTailExpr { file: source_ptr.file_id, expr: source_ptr.value, required, }); } } } fn validate_missing_tail_expr( &mut self, body_id: ExprId, possible_tail_id: ExprId, db: &dyn HirDatabase, ) { let mismatch = match self.infer.type_mismatch_for_expr(body_id) { Some(m) => m, None => return, }; let possible_tail_ty = match self.infer.type_of_expr.get(possible_tail_id) { Some(ty) => ty, None => return, }; if !mismatch.actual.is_unit() || mismatch.expected != *possible_tail_ty { return; } let (_, source_map) = db.body_with_source_map(self.owner); if let Ok(source_ptr) = source_map.expr_syntax(possible_tail_id) { self.sink .push(RemoveThisSemicolon { file: source_ptr.file_id, expr: source_ptr.value }); } } } pub fn record_literal_missing_fields( db: &dyn HirDatabase, infer: &InferenceResult, id: ExprId, expr: &Expr, ) -> Option<(VariantId, Vec<LocalFieldId>, /*exhaustive*/ bool)> { let (fields, exhaustive) = match expr { Expr::RecordLit { path: _, fields, spread } => (fields, spread.is_none()), _ => return None, }; let variant_def = infer.variant_resolution_for_expr(id)?; if let VariantId::UnionId(_) = variant_def { return None; } let variant_data = variant_def.variant_data(db.upcast()); let specified_fields: FxHashSet<_> = fields.iter().map(|f| &f.name).collect(); let missed_fields: Vec<LocalFieldId> = variant_data .fields() .iter() .filter_map(|(f, d)| if specified_fields.contains(&d.name) { None } else { Some(f) }) .collect(); if missed_fields.is_empty() { return None; } Some((variant_def, missed_fields, exhaustive)) } pub fn record_pattern_missing_fields( db: &dyn HirDatabase, infer: &InferenceResult, id: PatId, pat: &Pat, ) -> Option<(VariantId, Vec<LocalFieldId>, /*exhaustive*/ bool)> { let (fields, exhaustive) = match pat { Pat::Record { path: _, args, ellipsis } => (args, !ellipsis), _ => return None, }; let variant_def = infer.variant_resolution_for_pat(id)?; if let VariantId::UnionId(_) = variant_def { return None; } let variant_data = variant_def.variant_data(db.upcast()); let specified_fields: FxHashSet<_> = fields.iter().map(|f| &f.name).collect(); let missed_fields: Vec<LocalFieldId> = variant_data .fields() .iter() .filter_map(|(f, d)| if specified_fields.contains(&d.name) { None } else { Some(f) }) .collect(); if missed_fields.is_empty() { return None; } Some((variant_def, missed_fields, exhaustive)) } #[cfg(test)] mod tests { use crate::diagnostics::tests::check_diagnostics; #[test] fn simple_free_fn_zero() { check_diagnostics( r#" fn zero() {} fn f() { zero(1); } //^^^^^^^ Expected 0 arguments, found 1 "#, ); check_diagnostics( r#" fn zero() {} fn f() { zero(); } "#, ); } #[test] fn simple_free_fn_one() { check_diagnostics( r#" fn one(arg: u8) {} fn f() { one(); } //^^^^^ Expected 1 argument, found 0 "#, ); check_diagnostics( r#" fn one(arg: u8) {} fn f() { one(1); } "#, ); } #[test] fn method_as_fn() { check_diagnostics( r#" struct S; impl S { fn method(&self) {} } fn f() { S::method(); } //^^^^^^^^^^^ Expected 1 argument, found 0 "#, ); check_diagnostics( r#" struct S; impl S { fn method(&self) {} } fn f() { S::method(&S); S.method(); } "#, ); } #[test] fn method_with_arg() { check_diagnostics( r#" struct S; impl S { fn method(&self, arg: u8) {} } fn f() { S.method(); } //^^^^^^^^^^ Expected 1 argument, found 0 "#, ); check_diagnostics( r#" struct S; impl S { fn method(&self, arg: u8) {} } fn f() { S::method(&S, 0); S.method(1); } "#, ); } #[test] fn method_unknown_receiver() { // note: this is incorrect code, so there might be errors on this in the // future, but we shouldn't emit an argument count diagnostic here check_diagnostics( r#" trait Foo { fn method(&self, arg: usize) {} } fn f() { let x; x.method(); } "#, ); } #[test] fn tuple_struct()
#[test] fn enum_variant() { check_diagnostics( r#" enum En { Variant(u8, u16), } fn f() { En::Variant(0); } //^^^^^^^^^^^^^^ Expected 2 arguments, found 1 "#, ) } #[test] fn enum_variant_type_macro() { check_diagnostics( r#" macro_rules! Type { () => { u32 }; } enum Foo { Bar(Type![]) } impl Foo { fn new() { Foo::Bar(0); Foo::Bar(0, 1); //^^^^^^^^^^^^^^ Expected 1 argument, found 2 Foo::Bar(); //^^^^^^^^^^ Expected 1 argument, found 0 } } "#, ); } #[test] fn varargs() { check_diagnostics( r#" extern "C" { fn fixed(fixed: u8); fn varargs(fixed: u8, ...); fn varargs2(...); } fn f() { unsafe { fixed(0); fixed(0, 1); //^^^^^^^^^^^ Expected 1 argument, found 2 varargs(0); varargs(0, 1); varargs2(); varargs2(0); varargs2(0, 1); } } "#, ) } #[test] fn arg_count_lambda() { check_diagnostics( r#" fn main() { let f = |()| (); f(); //^^^ Expected 1 argument, found 0 f(()); f((), ()); //^^^^^^^^^ Expected 1 argument, found 2 } "#, ) } #[test] fn cfgd_out_call_arguments() { check_diagnostics( r#" struct C(#[cfg(FALSE)] ()); impl C { fn new() -> Self { Self( #[cfg(FALSE)] (), ) } fn method(&self) {} } fn main() { C::new().method(#[cfg(FALSE)] 0); } "#, ); } #[test] fn cfgd_out_fn_params() { check_diagnostics( r#" fn foo(#[cfg(NEVER)] x: ()) {} struct S; impl S { fn method(#[cfg(NEVER)] self) {} fn method2(#[cfg(NEVER)] self, arg: u8) {} fn method3(self, #[cfg(NEVER)] arg: u8) {} } extern "C" { fn fixed(fixed: u8, #[cfg(NEVER)] ...); fn varargs(#[cfg(not(NEVER))] ...); } fn main() { foo(); S::method(); S::method2(0); S::method3(S); S.method3(); unsafe { fixed(0); varargs(1, 2, 3); } } "#, ) } }
{ check_diagnostics( r#" struct Tup(u8, u16); fn f() { Tup(0); } //^^^^^^ Expected 2 arguments, found 1 "#, ) }
callbacks.rs
// Copyright 2013 The GLFW-RS Developers. For a full listing of the authors, // refer to the AUTHORS file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Private callback support functions. use libc::{c_char, c_double, c_int, c_uint}; use std::ffi::CStr; use std::mem; use std::path::PathBuf; use std::slice; use std::str; use std::sync::mpsc::Sender; use super::*; macro_rules! callback ( ( type Args = ($($arg:ident: $arg_ty:ty),*); type Callback = $Callback:ident; let ext_set = $ext_set:expr; fn callback($($ext_arg:ident: $ext_arg_ty:ty),*) $call:expr ) => ( thread_local!(static CALLBACK_KEY: RefCell<Option<Box<dyn Object<Args> + 'static>>> = RefCell::new(None)); type Args = ($($arg_ty),*,); trait Object<T> { fn call(&self, args: T); } impl<UserData> Object<Args> for ::Callback<fn($($arg_ty),*, &UserData), UserData> { fn call(&self, ($($arg),*,): Args) { (self.f)($($arg),*, &self.data); } } pub fn set<UserData: 'static>(f: ::$Callback<UserData>) { let mut boxed_cb = Some(Box::new(f) as Box<dyn Object<Args> + 'static>); CALLBACK_KEY.with(|cb| { *cb.borrow_mut() = boxed_cb.take(); }); ($ext_set)(Some(callback as extern "C" fn($($ext_arg: $ext_arg_ty),*))); } pub fn unset() { CALLBACK_KEY.with(|cb| { *cb.borrow_mut() = None; }); ($ext_set)(None); } extern "C" fn callback($($ext_arg: $ext_arg_ty),*) { CALLBACK_KEY.with(|cb| { match *cb.borrow() { Some(ref cb) => unsafe { cb.call($call) }, _ => {} } }) } ) ); pub mod error { use libc::{c_char, c_int}; use std::cell::RefCell; use std::mem; callback!( type Args = (error: ::Error, description: String); type Callback = ErrorCallback; let ext_set = |cb| unsafe { ::ffi::glfwSetErrorCallback(cb) }; fn callback(error: c_int, description: *const c_char) { (mem::transmute(error), ::string_from_c_str(description)) } ); } pub mod monitor { use libc::c_int; use std::cell::RefCell; use std::mem; callback!( type Args = (monitor: ::Monitor, event: ::MonitorEvent); type Callback = MonitorCallback; let ext_set = |cb| unsafe { ::ffi::glfwSetMonitorCallback(cb) }; fn callback(monitor: *mut ::ffi::GLFWmonitor, event: c_int) { let monitor = ::Monitor { ptr: monitor }; (monitor, mem::transmute(event)) } ); } pub mod joystick { use libc::c_int; use std::cell::RefCell; use std::mem; callback!( type Args = (joystick_id: ::JoystickId, event: ::JoystickEvent); type Callback = JoystickCallback; let ext_set = |cb| unsafe { ::ffi::glfwSetJoystickCallback(cb) }; fn callback(joystick_id: c_int, event: c_int) { (mem::transmute(joystick_id), mem::transmute(event)) } ); } unsafe fn get_sender<'a>(window: &'a *mut ffi::GLFWwindow) -> &'a Sender<(f64, WindowEvent)> { mem::transmute(ffi::glfwGetWindowUserPointer(*window)) } pub mod unbuffered { use crate::{WindowEvent, WindowId}; use std::cell::RefCell; type CallbackPtr = *mut std::ffi::c_void; type HandlerFn = fn( window_id: WindowId, event: (f64, WindowEvent), callback_ptr: CallbackPtr, ) -> Option<(f64, WindowEvent)>; thread_local! { static HANDLER: RefCell<Option<(HandlerFn, CallbackPtr)>> = RefCell::new(None); } pub struct UnsetHandlerGuard { _private: (), } impl Drop for UnsetHandlerGuard { fn drop(&mut self) { HANDLER.with(|ref_cell| { *ref_cell.borrow_mut() = None; }) } } pub unsafe fn handle( window_id: WindowId, event: (f64, WindowEvent), ) -> Option<(f64, WindowEvent)> { HANDLER.with(|ref_cell| { if let Some((handler, callback_ptr)) = *ref_cell.borrow() { handler(window_id, event, callback_ptr) } else
}) } pub unsafe fn set_handler<F>(callback: &mut F) -> UnsetHandlerGuard where F: FnMut(WindowId, (f64, WindowEvent)) -> Option<(f64, WindowEvent)>, { fn handler<F>( window_id: WindowId, event: (f64, WindowEvent), callback_ptr: CallbackPtr, ) -> Option<(f64, WindowEvent)> where F: FnMut(WindowId, (f64, WindowEvent)) -> Option<(f64, WindowEvent)>, { unsafe { let callback: &mut F = &mut *(callback_ptr as *mut F); callback(window_id, event) } } HANDLER.with(|ref_cell| { let callback_ptr = callback as *mut F as CallbackPtr; *ref_cell.borrow_mut() = Some((handler::<F>, callback_ptr)); }); UnsetHandlerGuard { _private: () } } } // Note that this macro creates a static function pointer rather than a plain function. // This makes it more ergonomic to embed in an Option; see set_window_callback! in lib.rs macro_rules! window_callback ( (fn $name:ident () => $event:ident) => ( pub static $name: (extern "C" fn(window: *mut ffi::GLFWwindow)) = { extern "C" fn actual_callback(window: *mut ffi::GLFWwindow) { unsafe { let event = (ffi::glfwGetTime() as f64, WindowEvent::$event); if let Some(event) = unbuffered::handle(window as WindowId, event) { get_sender(&window).send(event).unwrap(); } } } actual_callback }; ); (fn $name:ident ($($ext_arg:ident: $ext_arg_ty:ty),*) => $event:ident($($arg_conv:expr),*)) => ( pub static $name: (extern "C" fn(window: *mut ffi::GLFWwindow $(, $ext_arg: $ext_arg_ty)*)) = { extern "C" fn actual_callback(window: *mut ffi::GLFWwindow $(, $ext_arg: $ext_arg_ty)*) { unsafe { let event = (ffi::glfwGetTime() as f64, WindowEvent::$event($($arg_conv),*)); if let Some(event) = unbuffered::handle(window as WindowId, event) { get_sender(&window).send(event).unwrap(); } } } actual_callback }; ); ); window_callback!(fn window_pos_callback(xpos: c_int, ypos: c_int) => Pos(xpos as i32, ypos as i32)); window_callback!(fn window_size_callback(width: c_int, height: c_int) => Size(width as i32, height as i32)); window_callback!(fn window_close_callback() => Close); window_callback!(fn window_refresh_callback() => Refresh); window_callback!(fn window_focus_callback(focused: c_int) => Focus(focused == ffi::TRUE)); window_callback!(fn window_iconify_callback(iconified: c_int) => Iconify(iconified == ffi::TRUE)); window_callback!(fn framebuffer_size_callback(width: c_int, height: c_int) => FramebufferSize(width as i32, height as i32)); window_callback!(fn mouse_button_callback(button: c_int, action: c_int, mods: c_int) => MouseButton(mem::transmute(button), mem::transmute(action), Modifiers::from_bits(mods).unwrap())); window_callback!(fn cursor_pos_callback(xpos: c_double, ypos: c_double) => CursorPos(xpos as f64, ypos as f64)); window_callback!(fn cursor_enter_callback(entered: c_int) => CursorEnter(entered == ffi::TRUE)); window_callback!(fn scroll_callback(xpos: c_double, ypos: c_double) => Scroll(xpos as f64, ypos as f64)); window_callback!(fn key_callback(key: c_int, scancode: c_int, action: c_int, mods: c_int) => Key(mem::transmute(key), scancode, mem::transmute(action), Modifiers::from_bits(mods).unwrap())); window_callback!(fn char_callback(character: c_uint) => Char(::std::char::from_u32(character).unwrap())); window_callback!(fn char_mods_callback(character: c_uint, mods: c_int) => CharModifiers(::std::char::from_u32(character).unwrap(), Modifiers::from_bits(mods).unwrap())); window_callback!(fn drop_callback(num_paths: c_int, paths: *mut *const c_char) => FileDrop(slice::from_raw_parts(paths, num_paths as usize).iter().map(|path| PathBuf::from(str::from_utf8(CStr::from_ptr(*path).to_bytes()).unwrap().to_string())).collect())); window_callback!(fn window_maximize_callback(maximized: c_int) => Maximize(maximized == ffi::TRUE)); window_callback!(fn window_content_scale_callback(xscale: c_float, yscale: c_float) => ContentScale(xscale as f32, yscale as f32));
{ Some(event) }
loadbalancer.go
package loadbalancer // FINCLOUD_APACHE_NO_VERSION import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/tracing" "net/http" ) // Client is the loadBalancer Client type Client struct { BaseClient } // NewClient creates an instance of the Client client. func NewClient() Client { return NewClientWithBaseURI(DefaultBaseURI) } // NewClientWithBaseURI creates an instance of the Client client. func NewClientWithBaseURI(baseURI string) Client { return Client{NewWithBaseURI(baseURI)} } // CheckName 로드밸런서 이름 적합성 검사 // Parameters: // loadBalancerName - VPC 번호 func (client Client) CheckName(ctx context.Context, loadBalancerName string) (result CheckNameParameter, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/Client.CheckName") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.CheckNamePreparer(ctx, loadBalancerName) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "CheckName", nil, "Failure preparing request") return } resp, err := client.CheckNameSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "loadbalancer.Client", "CheckName", resp, "Failure sending request") return } result, err = client.CheckNameResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "CheckName", resp, "Failure responding to request") } return } // CheckNamePreparer prepares the CheckName request. func (client Client) CheckNamePreparer(ctx context.Context, loadBalancerName string) (*http.Request, error) { queryParameters := map[string]interface{}{ "loadBalancerName": autorest.Encode("query", loadBalancerName), } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/vpc-network/api/network/v1/load-balancers/check-load-balancer-name"), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CheckNameSender sends the CheckName request. The method will close the // http.Response Body if it receives an error. func (client Client) CheckNameSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // CheckNameResponder handles the response to the CheckName request. The method always // closes the http.Response Body. func (client Client) CheckNameResponder(resp *http.Response) (result CheckNameParameter, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Create 로드밸런서 생성 // Parameters: // parameters - 로드밸런서 생성 데이터 func (client Client) Create(ctx context.Context, parameters InstanceParameter) (result autorest.Response, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/Client.Create") defer func() { sc := -1 if result.Response != nil { sc = result.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.CreatePreparer(ctx, parameters) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Create", nil, "Failure preparing request") return } resp, err := client.CreateSender(req) if err != nil { result.Response = resp err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Create", resp, "Failure sending request") return } result, err = client.CreateResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Create", resp, "Failure responding to request") } return } // CreatePreparer prepares the Create request. func (client Client) CreatePreparer(ctx context.Context, parameters InstanceParameter) (*http.Request, error) { preparer := autorest.CreatePreparer( autorest.AsContentType("application/json;charset=UTF-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/vpc-network/api/network/v1/load-balancers/instances"), autorest.WithJSON(parameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client Client) CreateSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // CreateResponder handles the response to the Create request. The method always // closes the http.Response Body. func (client Client) CreateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByClosing()) result.Response = resp return } // Delete 로드밸런서 삭제 // Parameters: // parameters - 로드밸런서 번호 정보 func (client Client) Delete(ctx context.Context, parameters InstanceListParameter) (result autorest.Response, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/Client.Delete") defer func() { sc := -1 if result.Response != nil { sc = result.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.DeletePreparer(ctx, parameters) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Delete", nil, "Failure preparing request") return } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Delete", resp, "Failure sending request") return } result, err = client.DeleteResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. func (client Client) DeletePreparer(ctx context.Context, parameters InstanceListParameter) (*http.Request, error) { preparer := autorest.CreatePreparer( autorest.AsContentType("application/json;charset=UTF-8"), autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/vpc-network/api/network/v1/load-balancers/instances"), autorest.WithJSON(parameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } // Search 로드밸런서 정보 리스트 // Parameters: // parameters - 로드밸런서 검색 데이터 func (client Client) Search(ctx context.Context, parameters SearchParameter) (result SearchListParameter, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/Client.Search") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.SearchPreparer(ctx, parameters) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Search", nil, "Failure preparing request") return } resp, err := client.SearchSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Search", resp, "Failure sending request") return } result, err = client.SearchResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Search", resp, "Failure responding to request") } return } // SearchPreparer prepares the Search request. func (client Client) SearchPreparer(ctx context.Context, parameters SearchParameter) (*http.Request, error) { preparer := autorest.CreatePreparer( autorest.AsContentType("application/json;charset=UTF-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/vpc-network/api/network/v1/load-balancers/instances/search"), autorest.WithJSON(parameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // SearchSender sends the Search request. The method will close the // http.Response Body if it receives an error. func (client Client) SearchSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // SearchResponder handles the response to the Search request. The method always // closes the http.Response Body. func (client Client) SearchResponder(resp *http.Response) (result SearchListParameter, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // ServerInstance 로드밸런서 적용 서버 정보 // Parameters: // vpcNo - VPC 번호 // layerTypeCode - VPC 번호 func (client Client) ServerInstance(ctx context.Context, vpcNo string, layerTypeCode string) (result ServerInstanceListParameter, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/Client.ServerInstance") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.ServerInstancePreparer(ctx, vpcNo, layerTypeCode) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "ServerInstance", nil, "Failure preparing request") return } resp, err := client.ServerInstanceSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "loadbalancer.Client", "ServerInstance", resp, "Failure sending request") return } result, err = client.ServerInstanceResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "ServerInstance", resp, "Failure responding to request") } return } // ServerInstancePreparer prepares the ServerInstance request. func (client Client) ServerInstancePreparer(ctx context.Context, vpcNo string, layerTypeCode string) (*http.Request, error) { queryParameters := map[string]interface{}{ "layerTypeCode": autorest.Encode("query", layerTypeCode), "vpcNo": autorest.Encode("query", vpcNo), } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/vpc-network/api/network/v1/load-balancers/server-instances"), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ServerInstanceSender sends the ServerInstance request. The method will close the // http.Response Body if it receives an error. func (client Client) ServerInstanceSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // ServerInstanceResponder handles the response to the ServerInstance request. The method always // closes the http.Response Body. func (client Client) ServerInstanceResponder(resp *http.Response) (result ServerInstanceListParameter, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Update 로드밸런서 설정 변경 // Parameters: // parameters - 로드밸런서 설정 데이터 func (client Client) Update(ctx context.Context, parameters SettingParameter) (result autorest.Response, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/Client.Update") defer func() { sc := -1 if result.Response != nil { sc = result.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.UpdatePreparer(ctx, parameters) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Update", nil, "Failure preparing request") return } resp, err := client.UpdateSender(req) if err != nil { result.Response = resp err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Update", resp, "Failure sending request") return } result, err = client.UpdateResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "Update", resp, "Failure responding to request") } return } // UpdatePreparer prepares the Update request. func (client Client) UpdatePreparer(ctx context.Context, parameters SettingParameter) (*http.Request, error) { preparer := autorest.CreatePreparer( autorest.AsContentType("application/json;charset=UTF-8"), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/vpc-network/api/network/v1/load-balancers/instances/{lbInstanceNo}"), autorest.WithJSON(parameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client Client) UpdateSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // UpdateResponder handles the response to the Update request. The method always // closes the http.Response Body. func (client Client) UpdateResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByClosing()) result.Response = resp return } // ZoneSubnet 로드밸런서 금융존 서브넷 // Parameters: // vpcNo - VPC 번호 func (client Client) ZoneSubnet(ctx context.Context, vpcNo string) (result ZoneSubnetParameter, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/Client.ZoneSubnet") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.ZoneSubnetPreparer(ctx, vpcNo) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "ZoneSubnet", nil, "Failure preparing request") return } resp, err := client.ZoneSubnetSender(req) if err != nil { result.Response = autorest.Response{Resp
return } result, err = client.ZoneSubnetResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "loadbalancer.Client", "ZoneSubnet", resp, "Failure responding to request") } return } // ZoneSubnetPreparer prepares the ZoneSubnet request. func (client Client) ZoneSubnetPreparer(ctx context.Context, vpcNo string) (*http.Request, error) { queryParameters := map[string]interface{}{ "vpcNo": autorest.Encode("query", vpcNo), } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPath("/vpc-network/api/network/v1/load-balancers/zone-subnets"), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ZoneSubnetSender sends the ZoneSubnet request. The method will close the // http.Response Body if it receives an error. func (client Client) ZoneSubnetSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) return autorest.SendWithSender(client, req, sd...) } // ZoneSubnetResponder handles the response to the ZoneSubnet request. The method always // closes the http.Response Body. func (client Client) ZoneSubnetResponder(resp *http.Response) (result ZoneSubnetParameter, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return }
onse: resp} err = autorest.NewErrorWithError(err, "loadbalancer.Client", "ZoneSubnet", resp, "Failure sending request")
widget-b-first-route.component.spec.ts
import { ComponentFixture, TestBed, waitForAsync } from '@angular/core/testing'; import { WidgetBFirstRouteComponent } from './widget-b-first-route.component'; describe('WidgetBFirstRouteComponent', () => { let component: WidgetBFirstRouteComponent; let fixture: ComponentFixture<WidgetBFirstRouteComponent>; beforeEach( waitForAsync(() => { TestBed.configureTestingModule({ declarations: [WidgetBFirstRouteComponent], }).compileComponents(); }) );
fixture = TestBed.createComponent(WidgetBFirstRouteComponent); component = fixture.componentInstance; fixture.detectChanges(); }); it('should create', () => { expect(component).toBeTruthy(); }); });
beforeEach(() => {
actions.directive.spec.js
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ (function () { 'use strict'; describe('actions directive', function () { var $scope, $compile, $q, $templateCache, basePath; var rowItem = {id: 'row'}; var customItem = {id: 'custom'}; var callback = jasmine.createSpy('callback'); beforeEach(module('templates')); beforeEach(module('horizon.framework')); beforeEach(inject(function ($injector) { $compile = $injector.get('$compile'); basePath = $injector.get('horizon.framework.widgets.basePath'); $scope = $injector.get('$rootScope').$new(); $q = $injector.get('$q'); $templateCache = $injector.get('$templateCache'); })); it('should have no buttons if there are no actions', function () { var element = batchElementFor([]); expect(element.children().length).toBe(0); }); it('should allow for specifying by url for batch', function () { $scope.customItem = customItem; var element = batchElementFor([permittedActionWithUrl('custom')]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button').attr('class')).toEqual('btn-custom'); expect(actionList.find('button').attr('ng-click')).toEqual('disabled || callback(item)'); expect(actionList.text().trim()).toEqual('Custom Button'); actionList.find('button').click(); expect(callback).toHaveBeenCalledWith(customItem); }); it('should allow for specifying by url for row', function () { $scope.customItem = customItem; var element = rowElementFor([ permittedActionWithUrl('custom2'), permittedActionWithUrl('custom') ]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toEqual(-1); expect(actionList.find('button.btn-custom-2.split-button').text().trim()) .toEqual('Custom Button 2'); expect(actionList.find('li .btn-custom').text().trim()).toEqual('Custom Button'); actionList.find('button.btn-custom-2.split-button').click(); expect(callback).toHaveBeenCalledWith(undefined); actionList.find('li .btn-custom').click(); expect(callback).toHaveBeenCalledWith(customItem); }); it('should allow for specifying action text', function () { var element = batchElementFor([permittedActionWithText('Create Image')]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button').attr('ng-click')).toEqual('disabled || callback(item)'); expect(actionList.text().trim()).toEqual('Create Image'); actionList.find('button').click(); expect(callback).toHaveBeenCalled(); }); it('should allow for specifying by template for create', function () { var element = batchElementFor([permittedActionWithType('create', 'Create Image')]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button').attr('ng-click')).toEqual('disabled || callback(item)'); expect(actionList.text().trim()).toEqual('Create Image'); }); it('should allow for specifying by template for delete-selected', function () { var element = batchElementFor([permittedActionWithType('delete-selected', 'Delete Images')]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button').attr('ng-click')).toEqual('disabled || callback(item)'); expect(actionList.text().trim()).toEqual('Delete Images'); }); it('should allow for specifying by template for delete', function () { var element = rowElementFor([permittedActionWithType('delete', 'Delete Image')]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button').attr('ng-click')).toEqual('disabled || callback(item)'); expect(actionList.text().trim()).toEqual('Delete Image'); actionList.find('button').click(); expect(callback).toHaveBeenCalledWith(rowItem); }); it('should allow for specifying by template for danger', function () { var element = rowElementFor([permittedActionWithType('danger', 'Shutdown Instance')]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button').attr('ng-click')).toEqual('disabled || callback(item)'); expect(actionList.text().trim()).toEqual('Shutdown Instance'); actionList.find('button').click(); expect(callback).toHaveBeenCalledWith(rowItem); }); it('should have one button if there is one action', function () { var element = batchElementFor([ permittedActionWithType('create', 'Create Image') ]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button').attr('ng-click')).toEqual('disabled || callback(item)'); expect(actionList.text().trim()).toEqual('Create Image'); }); it('should have no buttons if not permitted', function () { var element = batchElementFor([notPermittedAction()]); expect(element.children().length).toBe(0); }); it('should have multiple buttons for multiple actions as a list', function () { var element = batchElementFor([ permittedActionWithType('create', 'Create Image'), permittedActionWithType('delete-selected', 'Delete Images') ]); expect(element.children().length).toBe(2); var actionList = element.find('action-list'); expect(actionList.length).toBe(2); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button.btn-default').text().trim()).toEqual('Create Image'); expect(actionList.find('button.btn-danger').text().trim()).toEqual('Delete Images'); }); it('should bind multiple callbacks for multiple buttons in a batch', function () { var callback1 = jasmine.createSpy('callback1'); var callback2 = jasmine.createSpy('callback2'); var element = batchElementFor([ permittedActionWithText('Action 1', 'btn-1', callback1), permittedActionWithText('Action 2', 'btn-2', callback2) ]); expect(element.children().length).toBe(2); var actionList = element.find('action-list'); expect(actionList.length).toBe(2); actionList.find('button.btn-1').click(); expect(callback1).toHaveBeenCalled(); actionList.find('button.btn-2').click(); expect(callback2).toHaveBeenCalled(); }); it('should have as many buttons as permitted', function () { var element = batchElementFor([ permittedActionWithType('create', 'Create Image'), notPermittedAction() ]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button.btn-default').text().trim()).toEqual('Create Image'); }); it('should have multiple buttons as a dropdown with correct styling', function () { var element = rowElementFor([ permittedActionWithText('Edit Instance', 'btn-custom'), permittedActionWithType('danger', 'Shutdown Instance') ]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toEqual(-1); expect(actionList.find('button.split-button.btn-custom').text().trim()) .toEqual('Edit Instance'); expect(actionList.find('li a.text-danger').text().trim()).toEqual('Shutdown Instance'); }); it('should style danger type button as button in a dropdown', function () { var element = rowElementFor([ permittedActionWithType('danger', 'Shutdown Instance'), permittedActionWithText('Edit Instance', 'btn-custom') ]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toEqual(-1); expect(actionList.find('button.split-button.btn-danger').text().trim()) .toEqual('Shutdown Instance'); expect(actionList.find('li a.btn-custom').text().trim()).toEqual('Edit Instance'); }); it('should have multiple buttons as a dropdown for actions text', function () { var element = rowElementFor([ permittedActionWithText('Create Image'), permittedActionWithText('Delete Image', 'text-danger') ]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toEqual(-1); expect(actionList.find('button').first().text().trim()).toEqual('Create Image'); expect(actionList.find('li a.text-danger').text().trim()).toEqual('Delete Image'); }); it('should bind callbacks per button for dropdowns', function () { var callback1 = jasmine.createSpy('callback1'); var callback2 = jasmine.createSpy('callback2'); var element = rowElementFor([ permittedActionWithText('Action 1', 'btn-1', callback1), permittedActionWithText('Action 2', 'btn-2', callback2) ]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); actionList.find('.btn-1').click(); expect(callback1).toHaveBeenCalledWith(rowItem); actionList.find('li .btn-2').click(); expect(callback2).toHaveBeenCalledWith(rowItem); }); it('should have one button if only one permitted for dropdown', function () { var element = rowElementFor([ permittedActionWithText('Single Action', 'btn-custom'), notPermittedAction() ]); expect(element.children().length).toBe(1); var actionList = element.find('action-list'); expect(actionList.length).toBe(1); expect(actionList.attr('class').indexOf('btn-addon')).toBeGreaterThan(-1); expect(actionList.find('button.btn-custom').text().trim()).toEqual('Single Action'); }); function permittedActionWithUrl(templateName) { return { template: { url: getTemplatePath("actions." + templateName) }, service: getService(getPermission(true), callback) }; } function permittedActionWithText(text, actionClasses, actionCallback) { return { template: { text: text, actionClasses: actionClasses }, service: getService(getPermission(true), actionCallback || callback) }; } function permittedActionWithType(templateType, text, actionCallback) { return { template: { type: templateType, text: text }, service: getService(getPermission(true), actionCallback || callback) }; } function notPermittedAction() { return { template: 'dummy', service: getService(getPermission(false), callback) }; } function getService(permissions, callback) { return { allowed: function(args) { if (args) { expect(args).toEqual(rowItem); } return permissions; }, perform: function(args) { callback(args); } }; } function getPermission(allowed) { var deferred = $q.defer(); if (allowed) { deferred.resolve(); } else { deferred.reject(); } return deferred.promise; } function batchElementFor(actions) { $scope.actions = function() { return actions; }; var element = angular.element(getTemplate('actions.batch')); $compile(element)($scope); $scope.$apply(); return element; } function rowElementFor(actions) { $scope.rowItem = rowItem; $scope.actions = function() { return actions; }; var element = angular.element(getTemplate('actions.row')); $compile(element)($scope); $scope.$apply(); return element; } function getTemplate(templateName) { return $templateCache.get(getTemplatePath(templateName)); } function getTemplatePath(templateName) { return basePath + 'action-list/' + templateName + '.mock.html'; } }); })();
*
transactions.js
var execSync = require('child_process').execSync; execSync("echo '" + __filename + "' >> /tmp/file_paths"); "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); var _createClass = function () { function
(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }(); function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } } var OrderTransactions = function () { function OrderTransactions(client) { _classCallCheck(this, OrderTransactions); this.client = client; } _createClass(OrderTransactions, [{ key: "create", value: function create(orderId, data) { return this.client.post("/orders/" + orderId + "/transactions", data); } }, { key: "update", value: function update(orderId, transactionId, data) { return this.client.put("/orders/" + orderId + "/transactions/" + transactionId, data); } }, { key: "delete", value: function _delete(orderId, transactionId) { return this.client.delete("/orders/" + orderId + "/transactions/" + transactionId); } }]); return OrderTransactions; }(); exports.default = OrderTransactions;
defineProperties
fibonacci-fibonacci-join-2-1.rs
extern crate rayon_1_0_0 ; extern crate lolbench_support ; use lolbench_support :: { criterion_from_env , init_logging } ; fn
( ) { init_logging ( ) ; let mut crit = criterion_from_env ( ) ; rayon_1_0_0 :: fibonacci :: fibonacci_join_2_1 ( & mut crit ) ; }
main
math.py
import tensorflow as tf def cov(x):
def inv_cov(x): return tf.linalg.inv(cov(x))
mean_x = tf.reduce_mean(x, axis=0, keepdims=True) mx = tf.matmul(tf.transpose(mean_x), mean_x) vx = tf.matmul(tf.transpose(x), x) / tf.cast(tf.shape(x)[0], tf.float64) cov_xx = vx - mx return cov_xx
models.rs
#![doc = "generated by AutoRust"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CheckNameAvailabilityInput { pub name: String, #[serde(rename = "type")] pub type_: ResourceType, } impl CheckNameAvailabilityInput { pub fn new(name: String, type_: ResourceType) -> Self { Self { name, type_ } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct CheckNameAvailabilityOutput { #[serde(rename = "NameAvailable", default, skip_serializing_if = "Option::is_none")] pub name_available: Option<bool>, #[serde(rename = "Reason", default, skip_serializing_if = "Option::is_none")] pub reason: Option<String>, #[serde(rename = "Message", default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } impl CheckNameAvailabilityOutput { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct CustomDomain { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<CustomDomainProperties>, } impl CustomDomain { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct CustomDomainListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<CustomDomain>, } impl CustomDomainListResult { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct CustomDomainParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<CustomDomainPropertiesParameters>, } impl CustomDomainParameters { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomDomainProperties { #[serde(rename = "hostName")] pub host_name: String, #[serde(rename = "resourceState", default, skip_serializing_if = "Option::is_none")] pub resource_state: Option<custom_domain_properties::ResourceState>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<ProvisioningState>, } impl CustomDomainProperties { pub fn new(host_name: String) -> Self { Self { host_name, resource_state: None, provisioning_state: None, } } } pub mod custom_domain_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ResourceState { Creating, Active, Deleting, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct CustomDomainPropertiesParameters { #[serde(rename = "hostName")] pub host_name: String, } impl CustomDomainPropertiesParameters { pub fn new(host_name: String) -> Self { Self { host_name } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeepCreatedOrigin { pub name: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<DeepCreatedOriginProperties>, } impl DeepCreatedOrigin { pub fn new(name: String) -> Self { Self { name, properties: None } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DeepCreatedOriginProperties { #[serde(rename = "hostName")] pub host_name: String, #[serde(rename = "httpPort", default, skip_serializing_if = "Option::is_none")] pub http_port: Option<i64>, #[serde(rename = "httpsPort", default, skip_serializing_if = "Option::is_none")] pub https_port: Option<i64>, } impl DeepCreatedOriginProperties { pub fn
(host_name: String) -> Self { Self { host_name, http_port: None, https_port: None, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Endpoint { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<EndpointProperties>, } impl Endpoint { pub fn new(tracked_resource: TrackedResource) -> Self { Self { tracked_resource, properties: None, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EndpointCreateParameters { pub location: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<EndpointPropertiesCreateParameters>, } impl EndpointCreateParameters { pub fn new(location: String) -> Self { Self { location, tags: None, properties: None, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct EndpointListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Endpoint>, } impl EndpointListResult { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct EndpointProperties { #[serde(rename = "hostName", default, skip_serializing_if = "Option::is_none")] pub host_name: Option<String>, #[serde(rename = "originHostHeader", default, skip_serializing_if = "Option::is_none")] pub origin_host_header: Option<String>, #[serde(rename = "originPath", default, skip_serializing_if = "Option::is_none")] pub origin_path: Option<String>, #[serde(rename = "contentTypesToCompress", default, skip_serializing_if = "Vec::is_empty")] pub content_types_to_compress: Vec<String>, #[serde(rename = "isCompressionEnabled", default, skip_serializing_if = "Option::is_none")] pub is_compression_enabled: Option<bool>, #[serde(rename = "isHttpAllowed", default, skip_serializing_if = "Option::is_none")] pub is_http_allowed: Option<bool>, #[serde(rename = "isHttpsAllowed", default, skip_serializing_if = "Option::is_none")] pub is_https_allowed: Option<bool>, #[serde(rename = "queryStringCachingBehavior", default, skip_serializing_if = "Option::is_none")] pub query_string_caching_behavior: Option<QueryStringCachingBehavior>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub origins: Vec<DeepCreatedOrigin>, #[serde(rename = "resourceState", default, skip_serializing_if = "Option::is_none")] pub resource_state: Option<endpoint_properties::ResourceState>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<ProvisioningState>, } impl EndpointProperties { pub fn new() -> Self { Self::default() } } pub mod endpoint_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ResourceState { Creating, Deleting, Running, Starting, Stopped, Stopping, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EndpointPropertiesCreateParameters { #[serde(rename = "originHostHeader", default, skip_serializing_if = "Option::is_none")] pub origin_host_header: Option<String>, #[serde(rename = "originPath", default, skip_serializing_if = "Option::is_none")] pub origin_path: Option<String>, #[serde(rename = "contentTypesToCompress", default, skip_serializing_if = "Vec::is_empty")] pub content_types_to_compress: Vec<String>, #[serde(rename = "isCompressionEnabled", default, skip_serializing_if = "Option::is_none")] pub is_compression_enabled: Option<bool>, #[serde(rename = "isHttpAllowed", default, skip_serializing_if = "Option::is_none")] pub is_http_allowed: Option<bool>, #[serde(rename = "isHttpsAllowed", default, skip_serializing_if = "Option::is_none")] pub is_https_allowed: Option<bool>, #[serde(rename = "queryStringCachingBehavior", default, skip_serializing_if = "Option::is_none")] pub query_string_caching_behavior: Option<QueryStringCachingBehavior>, pub origins: Vec<DeepCreatedOrigin>, } impl EndpointPropertiesCreateParameters { pub fn new(origins: Vec<DeepCreatedOrigin>) -> Self { Self { origin_host_header: None, origin_path: None, content_types_to_compress: Vec::new(), is_compression_enabled: None, is_http_allowed: None, is_https_allowed: None, query_string_caching_behavior: None, origins, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct EndpointPropertiesUpdateParameters { #[serde(rename = "originHostHeader", default, skip_serializing_if = "Option::is_none")] pub origin_host_header: Option<String>, #[serde(rename = "originPath", default, skip_serializing_if = "Option::is_none")] pub origin_path: Option<String>, #[serde(rename = "contentTypesToCompress", default, skip_serializing_if = "Vec::is_empty")] pub content_types_to_compress: Vec<String>, #[serde(rename = "isCompressionEnabled", default, skip_serializing_if = "Option::is_none")] pub is_compression_enabled: Option<bool>, #[serde(rename = "isHttpAllowed", default, skip_serializing_if = "Option::is_none")] pub is_http_allowed: Option<bool>, #[serde(rename = "isHttpsAllowed", default, skip_serializing_if = "Option::is_none")] pub is_https_allowed: Option<bool>, #[serde(rename = "queryStringCachingBehavior", default, skip_serializing_if = "Option::is_none")] pub query_string_caching_behavior: Option<QueryStringCachingBehavior>, } impl EndpointPropertiesUpdateParameters { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct EndpointUpdateParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<EndpointPropertiesUpdateParameters>, } impl EndpointUpdateParameters { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct ErrorResponse { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } impl ErrorResponse { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LoadParameters { #[serde(rename = "contentPaths")] pub content_paths: Vec<String>, } impl LoadParameters { pub fn new(content_paths: Vec<String>) -> Self { Self { content_paths } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct Operation { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub display: Option<operation::Display>, } impl Operation { pub fn new() -> Self { Self::default() } } pub mod operation { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct Display { #[serde(default, skip_serializing_if = "Option::is_none")] pub provider: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub resource: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, } impl Display { pub fn new() -> Self { Self::default() } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct OperationListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Operation>, } impl OperationListResult { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct Origin { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<OriginProperties>, } impl Origin { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct OriginListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Origin>, } impl OriginListResult { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct OriginParameters { #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<OriginPropertiesParameters>, } impl OriginParameters { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OriginProperties { #[serde(rename = "hostName")] pub host_name: String, #[serde(rename = "httpPort", default, skip_serializing_if = "Option::is_none")] pub http_port: Option<i64>, #[serde(rename = "httpsPort", default, skip_serializing_if = "Option::is_none")] pub https_port: Option<i64>, #[serde(rename = "resourceState", default, skip_serializing_if = "Option::is_none")] pub resource_state: Option<origin_properties::ResourceState>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<ProvisioningState>, } impl OriginProperties { pub fn new(host_name: String) -> Self { Self { host_name, http_port: None, https_port: None, resource_state: None, provisioning_state: None, } } } pub mod origin_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ResourceState { Creating, Active, Deleting, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OriginPropertiesParameters { #[serde(rename = "hostName")] pub host_name: String, #[serde(rename = "httpPort", default, skip_serializing_if = "Option::is_none")] pub http_port: Option<i64>, #[serde(rename = "httpsPort", default, skip_serializing_if = "Option::is_none")] pub https_port: Option<i64>, } impl OriginPropertiesParameters { pub fn new(host_name: String) -> Self { Self { host_name, http_port: None, https_port: None, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Profile { #[serde(flatten)] pub tracked_resource: TrackedResource, #[serde(default, skip_serializing_if = "Option::is_none")] pub sku: Option<Sku>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ProfileProperties>, } impl Profile { pub fn new(tracked_resource: TrackedResource) -> Self { Self { tracked_resource, sku: None, properties: None, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProfileCreateParameters { pub location: String, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, pub sku: Sku, } impl ProfileCreateParameters { pub fn new(location: String, sku: Sku) -> Self { Self { location, tags: None, sku } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct ProfileListResult { #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<Profile>, } impl ProfileListResult { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct ProfileProperties { #[serde(rename = "resourceState", default, skip_serializing_if = "Option::is_none")] pub resource_state: Option<profile_properties::ResourceState>, #[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")] pub provisioning_state: Option<ProvisioningState>, } impl ProfileProperties { pub fn new() -> Self { Self::default() } } pub mod profile_properties { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ResourceState { Creating, Active, Deleting, Disabled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProfileUpdateParameters { pub tags: serde_json::Value, } impl ProfileUpdateParameters { pub fn new(tags: serde_json::Value) -> Self { Self { tags } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProvisioningState { Creating, Succeeded, Failed, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct PurgeParameters { #[serde(rename = "contentPaths")] pub content_paths: Vec<String>, } impl PurgeParameters { pub fn new(content_paths: Vec<String>) -> Self { Self { content_paths } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum QueryStringCachingBehavior { IgnoreQueryString, BypassCaching, UseQueryString, NotSet, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, } impl Resource { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ResourceType { #[serde(rename = "Microsoft.Cdn/Profiles/Endpoints")] MicrosoftCdnProfilesEndpoints, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct Sku { #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<sku::Name>, } impl Sku { pub fn new() -> Self { Self::default() } } pub mod sku { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Name { #[serde(rename = "Standard_Verizon")] StandardVerizon, #[serde(rename = "Premium_Verizon")] PremiumVerizon, #[serde(rename = "Custom_Verizon")] CustomVerizon, #[serde(rename = "Standard_Akamai")] StandardAkamai, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct SsoUri { #[serde(rename = "ssoUriValue", default, skip_serializing_if = "Option::is_none")] pub sso_uri_value: Option<String>, } impl SsoUri { pub fn new() -> Self { Self::default() } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct TrackedResource { #[serde(flatten)] pub resource: Resource, pub location: String, pub tags: serde_json::Value, } impl TrackedResource { pub fn new(location: String, tags: serde_json::Value) -> Self { Self { resource: Resource::default(), location, tags, } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ValidateCustomDomainInput { #[serde(rename = "hostName")] pub host_name: String, } impl ValidateCustomDomainInput { pub fn new(host_name: String) -> Self { Self { host_name } } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)] pub struct ValidateCustomDomainOutput { #[serde(rename = "customDomainValidated", default, skip_serializing_if = "Option::is_none")] pub custom_domain_validated: Option<bool>, #[serde(default, skip_serializing_if = "Option::is_none")] pub reason: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } impl ValidateCustomDomainOutput { pub fn new() -> Self { Self::default() } }
new
monitor.rs
use std::fmt; use std::ptr; use std::ffi::OsStr; use std::io::Result; use std::ops::Deref; use std::os::unix::io::{AsRawFd, RawFd}; #[cfg(feature = "mio06")] use mio06::{event::Evented, unix::EventedFd, Poll, PollOpt, Ready, Token as Token06}; #[cfg(feature = "mio07")] use mio07::{event::Source, unix::SourceFd, Interest, Registry, Token as Token07}; use Udev; use {ffi, util}; use {AsRaw, AsRawWithContext, Device, FromRaw}; /// Monitors for device events. /// /// A monitor communicates with the kernel over a socket. Filtering events is performed efficiently /// in the kernel, and only events that match the filters are received by the socket. Filters must /// be setup before listening for events. pub struct Builder { udev: Udev, monitor: *mut ffi::udev_monitor, } impl Clone for Builder { fn clone(&self) -> Self { Self { udev: self.udev.clone(), monitor: unsafe { ffi::udev_monitor_ref(self.monitor) }, } } } impl Drop for Builder { fn drop(&mut self) { unsafe { ffi::udev_monitor_unref(self.monitor); } } } as_ffi_with_context!(Builder, monitor, ffi::udev_monitor, ffi::udev_monitor_ref); impl Builder { /// Creates a new `Monitor`. pub fn new() -> Result<Self> { // Create a new Udev context for this monitor // It would be more efficient to allow callers to create just one context and use multiple // monitors, however that would be an API-breaking change. Self::with_udev(Udev::new()?) } /// Creates a new `Monitor` using an existing `Udev` instance pub(crate) fn with_udev(udev: Udev) -> Result<Self> { let name = b"udev\0".as_ptr() as *const libc::c_char; let ptr = try_alloc!(unsafe { ffi::udev_monitor_new_from_netlink(udev.as_raw(), name) }); Ok(Self { udev, monitor: ptr }) } /// Adds a filter that matches events for devices with the given subsystem. pub fn match_subsystem<T: AsRef<OsStr>>(self, subsystem: T) -> Result<Self> { let subsystem = util::os_str_to_cstring(subsystem)?; util::errno_to_result(unsafe { ffi::udev_monitor_filter_add_match_subsystem_devtype( self.monitor, subsystem.as_ptr(), ptr::null(), ) }) .and(Ok(self)) } /// Adds a filter that matches events for devices with the given subsystem and device type. pub fn match_subsystem_devtype<T: AsRef<OsStr>, U: AsRef<OsStr>>( self, subsystem: T, devtype: U, ) -> Result<Self> { let subsystem = util::os_str_to_cstring(subsystem)?; let devtype = util::os_str_to_cstring(devtype)?; util::errno_to_result(unsafe { ffi::udev_monitor_filter_add_match_subsystem_devtype( self.monitor, subsystem.as_ptr(), devtype.as_ptr(), ) }) .and(Ok(self)) } /// Adds a filter that matches events for devices with the given tag. pub fn match_tag<T: AsRef<OsStr>>(self, tag: T) -> Result<Self> { let tag = util::os_str_to_cstring(tag)?; util::errno_to_result(unsafe { ffi::udev_monitor_filter_add_match_tag(self.monitor, tag.as_ptr()) }) .and(Ok(self)) } /// Removes all filters currently set on the monitor. pub fn clear_filters(self) -> Result<Self> { util::errno_to_result(unsafe { ffi::udev_monitor_filter_remove(self.monitor) }) .and(Ok(self)) } /// Listens for events matching the current filters. /// /// This method consumes the `Monitor`. pub fn listen(self) -> Result<Socket> { util::errno_to_result(unsafe { ffi::udev_monitor_enable_receiving(self.monitor) })?; Ok(Socket { inner: self }) } } /// An active monitor that can receive events. /// /// The events received by a `Socket` match the filters setup by the `Monitor` that created /// the socket. /// /// Monitors are initially setup to receive events from the kernel via a nonblocking socket. A /// variant of `poll()` should be used on the file descriptor returned by the `AsRawFd` trait to /// wait for new events. #[derive(Clone)] pub struct Socket { inner: Builder, } impl AsRaw<ffi::udev_monitor> for Socket { fn as_raw(&self) -> *mut ffi::udev_monitor { self.inner.monitor } fn into_raw(self) -> *mut ffi::udev_monitor { self.inner.monitor } } /// Provides raw access to the monitor's socket. impl AsRawFd for Socket { /// Returns the file descriptor of the monitor's socket. fn as_raw_fd(&self) -> RawFd { unsafe { ffi::udev_monitor_get_fd(self.inner.monitor) } } } impl Iterator for Socket { type Item = Event; fn next(&mut self) -> Option<Event> { let ptr = unsafe { ffi::udev_monitor_receive_device(self.inner.monitor) }; if ptr.is_null() { None } else { let device = Device::from_raw(self.inner.udev.clone(), ptr); Some(Event { device }) } } } /// Types of events that can be received from udev. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum EventType { /// A device was added. Add, /// A device changed. Change, /// A device was removed. Remove, /// A device was bound to driver. Bind, /// A device was unbound to driver. Unbind, /// An unknown event occurred. Unknown, } impl Default for EventType { fn default() -> Self { EventType::Unknown } } impl fmt::Display for EventType { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str(match *self { EventType::Add => "add", EventType::Change => "change", EventType::Remove => "remove", EventType::Bind => "bind", EventType::Unbind => "unbind", EventType::Unknown => "unknown", }) } } /// An event that indicates a change in device state. pub struct Event { device: Device, } /// Provides access to the device associated with the event. impl Deref for Event { type Target = Device; fn deref(&self) -> &Device { &self.device } } impl Event { /// Returns the `EventType` corresponding to this event. pub fn
(&self) -> EventType { let value = match self.device.property_value("ACTION") { Some(s) => s.to_str(), None => None, }; match value { Some("add") => EventType::Add, Some("change") => EventType::Change, Some("remove") => EventType::Remove, Some("bind") => EventType::Bind, Some("unbind") => EventType::Unbind, _ => EventType::Unknown, } } /// Returns the event's sequence number. pub fn sequence_number(&self) -> u64 { unsafe { ffi::udev_device_get_seqnum(self.device.as_raw()) as u64 } } /// Returns the device associated with this event. pub fn device(&self) -> Device { self.device.clone() } } #[cfg(feature = "mio06")] impl Evented for Socket { fn register( &self, poll: &Poll, token: Token06, interest: Ready, opts: PollOpt, ) -> std::io::Result<()> { EventedFd(&self.as_raw_fd()).register(poll, token, interest, opts) } fn reregister( &self, poll: &Poll, token: Token06, interest: Ready, opts: PollOpt, ) -> std::io::Result<()> { EventedFd(&self.as_raw_fd()).reregister(poll, token, interest, opts) } fn deregister(&self, poll: &Poll) -> std::io::Result<()> { EventedFd(&self.as_raw_fd()).deregister(poll) } } #[cfg(feature = "mio07")] impl Source for Socket { fn register( &mut self, registry: &Registry, token: Token07, interest: Interest, ) -> std::io::Result<()> { SourceFd(&self.as_raw_fd()).register(registry, token, interest) } fn reregister( &mut self, registry: &Registry, token: Token07, interest: Interest, ) -> std::io::Result<()> { SourceFd(&self.as_raw_fd()).reregister(registry, token, interest) } fn deregister(&mut self, registry: &Registry) -> std::io::Result<()> { SourceFd(&self.as_raw_fd()).deregister(registry) } }
event_type
db_test.go
package db import (
"github.com/dynatrace-sockshop/user/users" ) var ( TestDB = fake{} ErrFakeError = errors.New("Fake error") TestAddress = users.Address{ Street: "street", Number: "51b", Country: "Netherlands", City: "Amsterdam", ID: "000056", } ) func TestInit(t *testing.T) { err := Init() if err == nil { t.Error("Expected no registered db error") } Register("test", TestDB) database = "test" err = Init() if err != ErrFakeError { t.Error("expected fake db error from init") } TestAddress.AddLinks() } func TestSet(t *testing.T) { database = "nodb" err := Set() if err == nil { t.Error("Expecting error for no databade found") } Register("nodb2", TestDB) database = "nodb2" err = Set() if err != nil { t.Error(err) } } func TestRegister(t *testing.T) { l := len(DBTypes) Register("test2", TestDB) if len(DBTypes) != l+1 { t.Errorf("Expecting %v DB types received %v", l+1, len(DBTypes)) } l = len(DBTypes) Register("test2", TestDB) if len(DBTypes) != l { t.Errorf("Expecting %v DB types received %v duplicate names", l, len(DBTypes)) } } func TestCreateUser(t *testing.T) { err := CreateUser(&users.User{}) if err != ErrFakeError { t.Error("expected fake db error from create") } } func TestGetUser(t *testing.T) { _, err := GetUser("test") if err != ErrFakeError { t.Error("expected fake db error from get") } } func TestGetUserByName(t *testing.T) { _, err := GetUserByName("test") if err != ErrFakeError { t.Error("expected fake db error from get") } } func TestGetUserAttributes(t *testing.T) { u := users.New() GetUserAttributes(&u) if len(u.Addresses) != 1 { t.Error("expected one address added for GetUserAttributes") } if !reflect.DeepEqual(u.Addresses[0], TestAddress) { t.Error("expected matching addresses") } } func TestPing(t *testing.T) { err := Ping() if err != ErrFakeError { t.Error("expected fake db error from ping") } } type fake struct{} func (f fake) Init() error { return ErrFakeError } func (f fake) GetUserByName(name string) (users.User, error) { return users.User{}, ErrFakeError } func (f fake) GetUser(id string) (users.User, error) { return users.User{}, ErrFakeError } func (f fake) GetUsers() ([]users.User, error) { return make([]users.User, 0), ErrFakeError } func (f fake) CreateUser(*users.User) error { return ErrFakeError } func (f fake) GetUserAttributes(u *users.User) error { u.Addresses = append(u.Addresses, TestAddress) return nil } func (f fake) GetCard(id string) (users.Card, error) { return users.Card{}, ErrFakeError } func (f fake) GetCards() ([]users.Card, error) { return make([]users.Card, 0), ErrFakeError } func (f fake) CreateCard(c *users.Card, id string) error { return ErrFakeError } func (f fake) GetAddress(id string) (users.Address, error) { return users.Address{}, ErrFakeError } func (f fake) GetAddresses() ([]users.Address, error) { return make([]users.Address, 0), ErrFakeError } func (f fake) CreateAddress(u *users.Address, id string) error { return ErrFakeError } func (f fake) Delete(entity, id string) error { return ErrFakeError } func (f fake) Ping() error { return ErrFakeError }
"errors" "reflect" "testing"
output.rs
// https://github.com/sharkdp/bat a1b9334a44a2c652f52dddaa83dbacba57372468 // src/output.rs // See src/bat_utils/LICENSE use std::env; use std::ffi::OsString; use std::io::{self, Write}; use std::path::PathBuf; use std::process::{Child, Command, Stdio}; use super::less::retrieve_less_version; use crate::config; use crate::features::navigate; #[derive(Debug, Clone, Copy, PartialEq)] #[allow(dead_code)] pub enum PagingMode { Always, QuitIfOneScreen, Never, } use crate::errors::*; pub enum OutputType { Pager(Child), Stdout(io::Stdout), } impl OutputType { pub fn from_mode( mode: PagingMode, pager: Option<&str>, config: &config::Config, ) -> Result<Self> { use self::PagingMode::*; Ok(match mode { Always => OutputType::try_pager(false, pager, config)?, QuitIfOneScreen => OutputType::try_pager(true, pager, config)?, _ => OutputType::stdout(), }) } /// Try to launch the pager. Fall back to stdout in case of errors. fn try_pager( quit_if_one_screen: bool, pager_from_config: Option<&str>, config: &config::Config, ) -> Result<Self> { let mut replace_arguments_to_less = false; let pager_from_env = match ( env::var("DELTA_PAGER"), env::var("BAT_PAGER"), env::var("PAGER"), ) { (Ok(delta_pager), _, _) => Some(delta_pager), (_, Ok(bat_pager), _) => Some(bat_pager), (_, _, Ok(pager)) =>
_ => None, }; let pager_from_config = pager_from_config.map(|p| p.to_string()); if pager_from_config.is_some() { replace_arguments_to_less = false; } let pager = pager_from_config .or(pager_from_env) .unwrap_or_else(|| String::from("less")); let pagerflags = shell_words::split(&pager).chain_err(|| "Could not parse pager command.")?; match pagerflags.split_first() { Some((pager_name, args)) => { let pager_path = PathBuf::from(pager_name); let is_less = pager_path.file_stem() == Some(&OsString::from("less")); let mut process = if is_less { let mut p = Command::new(&pager_path); if args.is_empty() || replace_arguments_to_less { p.args(vec!["--RAW-CONTROL-CHARS"]); // Passing '--no-init' fixes a bug with '--quit-if-one-screen' in older // versions of 'less'. Unfortunately, it also breaks mouse-wheel support. // // See: http://www.greenwoodsoftware.com/less/news.530.html // // For newer versions (530 or 558 on Windows), we omit '--no-init' as it // is not needed anymore. match retrieve_less_version() { None => { p.arg("--no-init"); } Some(version) if (version < 530 || (cfg!(windows) && version < 558)) => { p.arg("--no-init"); } _ => {} } if quit_if_one_screen { p.arg("--quit-if-one-screen"); } } else { p.args(args); } p.env("LESSCHARSET", "UTF-8"); p } else { if pager_path.file_stem() == Some(&OsString::from("delta")) { eprintln!( "\ It looks like you have set delta as the value of $PAGER. \ This would result in a non-terminating recursion. \ delta is not an appropriate value for $PAGER \ (but it is an appropriate value for $GIT_PAGER)." ); std::process::exit(1); } let mut p = Command::new(&pager_path); p.args(args); p }; if config.navigate { process.args(&["--pattern", &navigate::make_navigate_regexp(&config)]); } Ok(process .env("LESSANSIENDCHARS", "mK") .stdin(Stdio::piped()) .spawn() .map(OutputType::Pager) .unwrap_or_else(|_| OutputType::stdout())) } None => Ok(OutputType::stdout()), } } fn stdout() -> Self { OutputType::Stdout(io::stdout()) } pub fn handle(&mut self) -> Result<&mut dyn Write> { Ok(match *self { OutputType::Pager(ref mut command) => command .stdin .as_mut() .chain_err(|| "Could not open stdin for pager")?, OutputType::Stdout(ref mut handle) => handle, }) } } impl Drop for OutputType { fn drop(&mut self) { if let OutputType::Pager(ref mut command) = *self { let _ = command.wait(); } } }
{ // less needs to be called with the '-R' option in order to properly interpret ANSI // color sequences. If someone has set PAGER="less -F", we therefore need to // overwrite the arguments and add '-R'. // We only do this for PAGER, since it is used in other contexts. replace_arguments_to_less = true; Some(pager) }
psmdb.go
package psmdb import ( "fmt" "os" "strings" v110 "github.com/Percona-Lab/percona-dbaas-cli/dbaas-lib/engines/k8s-psmdb/types/v110" v120 "github.com/Percona-Lab/percona-dbaas-cli/dbaas-lib/engines/k8s-psmdb/types/v120" v130 "github.com/Percona-Lab/percona-dbaas-cli/dbaas-lib/engines/k8s-psmdb/types/v130" "github.com/Percona-Lab/percona-dbaas-cli/dbaas-lib/k8s" "github.com/Percona-Lab/percona-dbaas-cli/dbaas-lib/pdl" "github.com/pkg/errors" ) const ( provider = "k8s" engine = "psmdb" defaultVersion Version = "1.3.0" ) var objects map[Version]VersionObject func
() { // Register psmdb engine in dbaas psmdb, err := NewPSMDBController("", "k8s") if err != nil { fmt.Println("Cant start. Setup your kubectl") os.Exit(1) } pdl.RegisterEngine(provider, engine, psmdb) // Register psmdb versions objects = make(map[Version]VersionObject) objects["1.1.0"] = VersionObject{ k8s: k8s.Objects{ Bundle: v110.Bundle, }, psmdb: &v110.PerconaServerMongoDB{}, } objects["1.2.0"] = VersionObject{ k8s: k8s.Objects{ Bundle: v120.Bundle, }, psmdb: &v120.PerconaServerMongoDB{}, } objects["1.3.0"] = VersionObject{ k8s: k8s.Objects{ Bundle: v130.Bundle, }, psmdb: &v130.PerconaServerMongoDB{}, } } // PSMDB represents PSMDB Operator controller type PSMDB struct { cmd *k8s.Cmd conf PSMDBCluster platformType k8s.PlatformType } type Version string type PSMDBMeta struct { Name string `json:"name"` Namespace string `json:"namespace"` } type AppState string const ( AppStateUnknown AppState = "unknown" AppStateInit = "initializing" AppStateReady = "ready" AppStateError = "error" ) type PSMDBClusterStatus struct { Messages []string `json:"message,omitempty"` Status AppState `json:"state,omitempty"` } type AppStatus struct { Size int32 `json:"size,omitempty"` Ready int32 `json:"ready,omitempty"` Status AppState `json:"status,omitempty"` Message string `json:"message,omitempty"` } type PSMDBResource struct { Meta PSMDBMeta `json:"metadata"` Status PSMDBClusterStatus } type k8sCluster struct { Items []PSMDBResource `json:"items"` } type k8sStatus struct { Status PSMDBClusterStatus } type PVCMeta struct { Name string `json:"name"` Namespace string `json:"namespace"` SelfLink string `json:"selflink"` UID string `json:"uid"` } type k8sPVC struct { Meta PVCMeta `json:"metadata"` } type VersionObject struct { k8s k8s.Objects psmdb PSMDBCluster } // NewPSMDBController returns new PSMDBOperator Controller func NewPSMDBController(envCrt, provider string) (*PSMDB, error) { var psmdb PSMDB if len(provider) == 0 || provider == "k8s" { k8sCmd, err := k8s.New(envCrt) if err != nil { return nil, errors.Wrap(err, "new Cmd") } psmdb.cmd = k8sCmd psmdb.platformType = k8sCmd.GetPlatformType() } return &psmdb, nil } func (p PSMDB) bundle(v map[Version]VersionObject, operatorVersion string) []k8s.BundleObject { if operatorVersion == "" { operatorVersion = v[defaultVersion].psmdb.GetOperatorImage() } for i, o := range v[defaultVersion].k8s.Bundle { if o.Kind == "Deployment" && o.Name == p.operatorName() { v[defaultVersion].k8s.Bundle[i].Data = strings.Replace(o.Data, "{{image}}", operatorVersion, -1) } } return v[defaultVersion].k8s.Bundle } func (p PSMDB) getCR(cluster PSMDBCluster) (string, error) { return cluster.GetCR() } func (p *PSMDB) operatorName() string { return "percona-server-mongodb-operator" }
init
generate_folder_directory.py
import os import sys from datetime import datetime class GenerateStructure: def __init__(self, number_of_lectures, number_of_labs, number_of_homework, number_of_sections, number_of_advanced_sections, folders, default_directory, default_directory_lectures, default_directory_lecture_playground, default_directory_labs, default_directory_labs_playground, default_directory_homework, default_directory_homework_playground, default_directory_advanced_sections, default_directory_advanced_sections_playground, default_directory_sections_playground): self.number_of_lectures = number_of_lectures self.number_of_labs = number_of_labs self.number_of_homework = number_of_homework self.number_of_sections = number_of_sections self.number_of_advanced_sections = number_of_advanced_sections self.folders = folders self.default_directory = default_directory self.default_directory_lectures = default_directory_lectures self.default_directory_lecture_playground = default_directory_lecture_playground self.default_directory_labs = default_directory_labs self.default_directory_labs_playground = default_directory_labs_playground self.default_directory_homework = default_directory_homework self.default_directory_homework_playground = default_directory_homework_playground self.default_directory_advanced_sections = default_directory_advanced_sections self.default_directory_sections = default_directory_sections self.default_directory_sections_playground = default_directory_sections_playground @staticmethod def create_directory(directory, fold): os.makedirs(directory + fold) open(directory + fold + '/.placeholder', 'w').close() @staticmethod def create_index(directory, title, category, slug, i): with open(directory + "index.md", 'a')as index: index.write("Title: " + title + str(i) + ':\n' + "Category: " + category + '\n' + "Date: " + datetime.today().strftime('%Y-%m-%d') + '\n' + "Author: " + '\n' + "Slug: " + slug + str(i) + '\n' + "Tages: ADD TAGS HERE" + '\n\n\n' + "## Slides" ) # Create Lectures folders def create_lectures(self, directory, number_of_hw, folders): for i in range(1, number_of_hw+1): if not os.path.exists(directory): os.makedirs(directory) if not os.path.exists(directory + "lecture" + str(i)): os.makedirs(directory + "lecture" + str(i)) directory_lectures = directory + "lecture" + str(i) + '/' for fold in folders: self.create_directory(directory_lectures, fold) self.create_index(directory_lectures, "Lecture ", "lectures" , "lecture", i) else: print("The directory : '", directory + "lecture" + str(i), "' already exist.") # Create Lectures playground folders def create_lecture_playground(self, directory, number_of_lectures, folders): for i in range(1, number_of_lectures+1): if not os.path.exists(directory): os.makedirs(directory) if not os.path.exists(directory + "lecture" + str(i)): os.makedirs(directory + "lecture" + str(i)) directory_lectures = directory + "lecture" + str(i) + '/' for fold in folders: self.create_directory(directory_lectures, fold) else: print("The directory : '", directory + "lecture" + str(i), "' already exist.") # Create Labs folders def create_labs(self, directory, number_of_labs, folders): for i in range(1, number_of_labs+1): if not os.path.exists(directory): os.makedirs(directory) if not os.path.exists(directory + "lab" + str(i)): os.makedirs(directory + "lab" + str(i)) directory_lab = directory + "lab" + str(i) + '/' for fold in folders: self.create_directory(directory_lab, fold) self.create_index(directory_lab, "Lab ", "labs", "lab", i) else: print("The directory : '", directory + "lab" + str(i), "' already exist.") # Create Homework folders def create_homework(self, directory, number_of_labs, folders): for i in range(0, number_of_labs+1): if not os.path.exists(directory): os.makedirs(directory) if not os.path.exists(directory + "how" + str(i)): os.makedirs(directory + "how" + str(i)) directory_homework = directory + "how" + str(i) + '/' for fold in folders: self.create_directory(directory_homework, fold) else: print("The directory : '", directory + "homework" + str(i), "' already exist.") # Create Sections folders def create_section(self, directory, sections, folders): for i in range(1, sections + 1): if not os.path.exists(directory): os.makedirs(directory)
if not os.path.exists(directory + "section" + str(i)): os.makedirs(directory + "section" + str(i)) directory_section = directory + "section" + str(i) + '/' for fold in folders: self.create_directory(directory_section, fold) self.create_index(directory_section, "Sections ", "section", "section", i) else: print("The directory : '", directory + "Sections" + str(i), "' already exist.") # Create AdvancedSections folders def create_a_section(self, directory, advanced_sections, folders): for i in range(1, advanced_sections+1): if not os.path.exists(directory): os.makedirs(directory) if not os.path.exists(directory + "a-sec" + str(i)): os.makedirs(directory + "a-sec" + str(i)) directory_a_section = directory + "a-sec" + str(i) + '/' for fold in folders: self.create_directory(directory_a_section, fold) self.create_index(directory_a_section, "Advanced Sections ", "a-sections", "a-sections", i) else: print("The directory : '", directory + "Advanced Sections" + str(i), "' already exist.") if __name__ == "__main__": number_of_lectures = 24 number_of_labs = 13 number_of_homework = 8 number_of_advanced_sections = 6 number_of_sections = 13 folders = ["data", "fig", "notes", "presentation"] default_directory = os.path.dirname(os.path.realpath(__file__)) + '/' default_directory_lectures = default_directory + "lectures/" default_directory_lecture_playground = default_directory + "lectures_playground/" default_directory_labs = default_directory + "labs/" default_directory_labs_playground = default_directory + "labs_playground/" default_directory_homework = default_directory + "homeworks/" default_directory_homework_playground = default_directory + "homeworks_playground/" default_directory_advanced_sections = default_directory + "a-sections/" default_directory_advanced_sections_playground = default_directory + "a_sections_playground/" default_directory_sections = default_directory + "sections/" default_directory_sections_playground = default_directory + "sections_playground/" try: print("The default values are :") print("Number of lectures: ", number_of_lectures) print("Number of labs: ", number_of_labs) print("Number of sections: ", number_of_sections) print("Number of a-sections: ", number_of_advanced_sections) print("Default directory: ", default_directory, '\n') change = input("Do you want to change it ? Please press 'y' if you want or 'n' if you do not change it: ") if change == '': change = 'n' while change not in ('y', 'n'): change = input("Do you want to change it ? Please press 'y' if you want or 'n' if you do not change it: ") if change == 'y': default_directory = input("Please enter the default directory: ") print("Default directory: ", default_directory, '\n') default_directory_lectures = default_directory + "lectures/" default_directory_lecture_playground = default_directory + "lecture_playground/" default_directory_labs = default_directory + "labs/" default_directory_labs_playground = default_directory + "lab_playground/" default_directory_homework = default_directory + "homework/" default_directory_homework_playground = default_directory + "homework_playground/" default_directory_advanced_sections = default_directory + "a-section/" number_of_lectures = int(input("Please enter the number of lectures: ")) print("Number of lectures: ", number_of_lectures, '\n') number_of_labs = int(input("Please enter the number of labs: ")) print("Number of labs: ", number_of_labs, '\n') number_of_homework = int(input("Please enter the number of homework: ")) print("Number of homework: ", number_of_homework, '\n') number_of_advanced_sections = int(input("Please enter the number of advanced sections: ")) print("Number of advanced sections: ", number_of_advanced_sections, '\n') ge = GenerateStructure(number_of_lectures, number_of_labs, number_of_homework, number_of_sections, number_of_advanced_sections, folders, default_directory, default_directory_lectures, default_directory_lecture_playground, default_directory_labs, default_directory_labs_playground, default_directory_homework, default_directory_homework_playground, default_directory_advanced_sections, default_directory_advanced_sections_playground, default_directory_sections_playground) ge.create_lectures(default_directory_lectures, number_of_lectures, folders) # Create Lectures folders ge.create_lecture_playground(default_directory_lecture_playground, number_of_lectures, folders) # Create Lectures playground folders ge.create_labs(default_directory_labs, number_of_labs, folders) # Create Labs folders ge.create_labs(default_directory_labs_playground, number_of_labs, folders) # Create Labs playground folders ge.create_homework(default_directory_homework, number_of_homework, folders) # Create Homework folders ge.create_homework(default_directory_homework_playground, number_of_homework, folders) # Create Homework playground folders ge.create_section(default_directory_sections, number_of_sections, folders) # Create sections folders ge.create_section(default_directory_sections_playground, number_of_sections, folders) # Create sections playground folders ge.create_a_section(default_directory_advanced_sections, number_of_advanced_sections, folders) # Create advanced sections folders ge.create_a_section(default_directory_advanced_sections_playground, number_of_advanced_sections, folders) # Create advanced sections folders except OSError as err: print("OS error: {0}".format(err)) except ValueError: print("Could not convert data to an integer.") except: print("Unexpected error:", sys.exc_info()[0]) raise
dumb.rs
//! A really dumb PPU benchmark #![feature(test)] extern crate breeze_core; extern crate breeze_backends; extern crate breeze_backend; extern crate test; use breeze_core::snes::Emulator; use breeze_core::rom::Rom; use breeze_backend::Renderer; use breeze_backend::dummy::{DummyRenderer, DummySink}; use test::Bencher; use std::iter; fn build_rom() -> Vec<u8> { let code = [ 0xA9, 0x00, // lda #0 0xA2, 0x00, // ldx #0 0xA0, 0x00, // ldy #0 0x9A, // txs // Let the PPU do some work // Disable forced blank and set brightness to max 0xA9, 0x0F, // lda #$0F 0x8D, 0x00, 0x21, // sta $2100 // Enable all layers on the main screen 0xA9, 0x1F, // lda #$1F 0x8D, 0x2C, 0x21, // sta $212C // Enter endless loop 0xA9, 0x00, // lda #0 0xF0, 0xFE, // beq -2 (self) ]; // Build the header let mut header = Vec::with_capacity(32); // First 21 Bytes: Title (ASCII) let name = b"BENCHROM"; header.extend(name.into_iter() .chain(iter::repeat(&b' ')) .take(21)); header.push(0); // ROM makeup Byte - LoROM, no FastROM header.push(0); // Chipset (none/don't care) header.push(6); // ROM size - $400<<6 = 64K bytes header.push(0); // Cart. RAM size - $400 bytes header.push(0); // Vendor code header.push(0); header.push(0); // Version header.push(0x55); // Checksum (invalid) header.push(0x55); header.push(0xAA); // Checksum complement header.push(0xAA); // Extended header (ignored) assert_eq!(header.len(), 32); assert!(code.len() < 0x8000 - 64, "code size too high"); // Now we can put the image together // The header is located (for LoROM) at `0x8000 - 64`, preceded by code that will be mapped to // 0x8000+, followed by the extended header, the interrupt vectors, and the data section(s) // (in our case) let mut rom = code.iter() .cloned() .chain(iter::repeat(0)) .take(0x8000 - 64) .chain(header.into_iter()) .chain(iter::repeat(0)) .take(0x8000 * 2) .collect::<Vec<_>>(); // Set the correct vectors (emulation mode) // RESET @ 0x8000 rom[0x7ffc] = 0x00; rom[0x7ffd] = 0x80; // This should now be a valid, runnable 64K ROM image (minus the checksum) rom } #[bench] fn dumb(b: &mut Bencher)
{ let rom = build_rom(); let rom = Rom::from_bytes(&rom).unwrap(); let mut emu = Emulator::new(rom, DummyRenderer::create().unwrap(), DummySink); b.iter(|| { emu.snes.render_frame(|_| Ok(vec![])).unwrap(); }); }
redolog_test.go
/* Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ package statecouchdb import ( "fmt" "io/ioutil" "os" "testing" "github.com/davecgh/go-spew/spew" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb" "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version" "github.com/hyperledger/fabric/core/ledger/ledgerconfig" "github.com/stretchr/testify/assert" ) func TestRedoLogger(t *testing.T)
func TestCouchdbRedoLogger(t *testing.T) { testEnv := NewTestVDBEnv(t) defer testEnv.Cleanup() // commitToRedologAndRestart - a helper function that commits directly to redologs and restart the statedb commitToRedologAndRestart := func(newVal string, version *version.Height) { batch := statedb.NewUpdateBatch() batch.Put("ns1", "key1", []byte(newVal), version) db, err := testEnv.DBProvider.GetDBHandle("testcouchdbredologger") assert.NoError(t, err) vdb := db.(*VersionedDB) assert.NoError(t, vdb.redoLogger.persist( &redoRecord{ UpdateBatch: batch, Version: version, }, ), ) testEnv.CloseAndReopen() } // verifyExpectedVal - a helper function that verifies the statedb contents verifyExpectedVal := func(expectedVal string, expectedSavepoint *version.Height) { db, err := testEnv.DBProvider.GetDBHandle("testcouchdbredologger") assert.NoError(t, err) vdb := db.(*VersionedDB) vv, err := vdb.GetState("ns1", "key1") assert.NoError(t, err) assert.Equal(t, expectedVal, string(vv.Value)) savepoint, err := vdb.GetLatestSavePoint() assert.NoError(t, err) assert.Equal(t, expectedSavepoint, savepoint) } // initialize statedb with initial set of writes db, err := testEnv.DBProvider.GetDBHandle("testcouchdbredologger") if err != nil { t.Fatalf("Failed to get database handle: %s", err) } vdb := db.(*VersionedDB) batch1 := statedb.NewUpdateBatch() batch1.Put("ns1", "key1", []byte("value1"), version.NewHeight(1, 1)) vdb.ApplyUpdates(batch1, version.NewHeight(1, 1)) // make redolog one block ahead than statedb - upon restart the redolog should get applied commitToRedologAndRestart("value2", version.NewHeight(2, 1)) verifyExpectedVal("value2", version.NewHeight(2, 1)) // make redolog two blocks ahead than statedb - upon restart the redolog should be ignored commitToRedologAndRestart("value3", version.NewHeight(4, 1)) verifyExpectedVal("value2", version.NewHeight(2, 1)) // make redolog one block behind than statedb - upon restart the redolog should be ignored commitToRedologAndRestart("value3", version.NewHeight(1, 5)) verifyExpectedVal("value2", version.NewHeight(2, 1)) // A nil height should cause skipping the writing of redo-record db, _ = testEnv.DBProvider.GetDBHandle("testcouchdbredologger") vdb = db.(*VersionedDB) vdb.ApplyUpdates(batch1, nil) record, err := vdb.redoLogger.load() assert.NoError(t, err) assert.Equal(t, version.NewHeight(1, 5), record.Version) assert.Equal(t, []byte("value3"), record.UpdateBatch.Get("ns1", "key1").Value) // A batch that does not contain PostOrderWrites should cause skipping the writing of redo-record db, _ = testEnv.DBProvider.GetDBHandle("testcouchdbredologger") vdb = db.(*VersionedDB) batchWithNoGeneratedWrites := batch1 batchWithNoGeneratedWrites.ContainsPostOrderWrites = false vdb.ApplyUpdates(batchWithNoGeneratedWrites, version.NewHeight(2, 5)) record, err = vdb.redoLogger.load() assert.NoError(t, err) assert.Equal(t, version.NewHeight(1, 5), record.Version) assert.Equal(t, []byte("value3"), record.UpdateBatch.Get("ns1", "key1").Value) // A batch that contains PostOrderWrites should cause writing of redo-record db, _ = testEnv.DBProvider.GetDBHandle("testcouchdbredologger") vdb = db.(*VersionedDB) batchWithGeneratedWrites := batch1 batchWithGeneratedWrites.ContainsPostOrderWrites = true vdb.ApplyUpdates(batchWithNoGeneratedWrites, version.NewHeight(3, 4)) record, err = vdb.redoLogger.load() assert.NoError(t, err) assert.Equal(t, version.NewHeight(3, 4), record.Version) assert.Equal(t, []byte("value1"), record.UpdateBatch.Get("ns1", "key1").Value) } func redologTestSetup(t *testing.T) (p *redoLoggerProvider, cleanup func()) { dbPath := ledgerconfig.GetCouchdbRedologsPath() assert.NoError(t, os.RemoveAll(dbPath)) p = newRedoLoggerProvider(dbPath) cleanup = func() { p.close() assert.NoError(t, os.RemoveAll(dbPath)) } return } // testGenerareRedoRecord is the code that generates a serialized redo record into a // file based on the current version of the code, so that the file with serialized data // can get checked into source control. The following test function // 'TestReadExistingRedoRecord' verifies data compatibility in later builds/releases. // Specifically, it verifies that the changes in the struct statedb.NewUpdateBatch // are compatible such that the redo records persisted from the earlier commit/release // can still be deserialized on later commits/releases. // In order to generate this serialized record, change this function name to start with // uppercase "T" so that execution of go test will generate the test file. func testGenerareRedoRecord(t *testing.T) { val, err := encodeRedologVal(constructSampleRedoRecord()) assert.NoError(t, err) assert.NoError(t, ioutil.WriteFile("testdata/persisted_redo_record", val, 0644)) } func TestReadExistingRedoRecord(t *testing.T) { b, err := ioutil.ReadFile("testdata/persisted_redo_record") assert.NoError(t, err) rec, err := decodeRedologVal(b) assert.NoError(t, err) t.Logf("rec = %s", spew.Sdump(rec)) assert.Equal(t, constructSampleRedoRecord(), rec) } func constructSampleRedoRecord() *redoRecord { batch := statedb.NewUpdateBatch() batch.Put("ns1", "key1", []byte("value1"), version.NewHeight(1, 1)) batch.Put("ns2", string([]byte{0x00, 0xff}), []byte("value3"), version.NewHeight(3, 3)) batch.PutValAndMetadata("ns2", string([]byte{0x00, 0xff}), []byte("value3"), []byte("metadata"), version.NewHeight(4, 4)) batch.Delete("ns2", string([]byte{0xff, 0xff}), version.NewHeight(5, 5)) return &redoRecord{ UpdateBatch: batch, Version: version.NewHeight(10, 10), } }
{ provider, cleanup := redologTestSetup(t) defer cleanup() loggers := []*redoLogger{} records := []*redoRecord{} verifyLogRecords := func() { for i := 0; i < len(loggers); i++ { retrievedRec, err := loggers[i].load() assert.NoError(t, err) assert.Equal(t, records[i], retrievedRec) } } // write log records for multiple channels for i := 0; i < 10; i++ { logger := provider.newRedoLogger(fmt.Sprintf("channel-%d", i)) rec, err := logger.load() assert.NoError(t, err) assert.Nil(t, rec) loggers = append(loggers, logger) batch := statedb.NewUpdateBatch() blkNum := uint64(i) batch.Put("ns1", "key1", []byte("value1"), version.NewHeight(blkNum, 1)) batch.Put("ns2", string([]byte{0x00, 0xff}), []byte("value3"), version.NewHeight(blkNum, 3)) batch.PutValAndMetadata("ns2", string([]byte{0x00, 0xff}), []byte("value3"), []byte("metadata"), version.NewHeight(blkNum, 4)) batch.Delete("ns2", string([]byte{0xff, 0xff}), version.NewHeight(blkNum, 5)) rec = &redoRecord{ UpdateBatch: batch, Version: version.NewHeight(blkNum, 10), } records = append(records, rec) assert.NoError(t, logger.persist(rec)) } verifyLogRecords() // overwrite logrecord for one channel records[5].UpdateBatch = statedb.NewUpdateBatch() records[5].Version = version.NewHeight(5, 5) assert.NoError(t, loggers[5].persist(records[5])) verifyLogRecords() }
Gruntfile.js
"use strict"; module.exports = function (grunt) { // A temporary directory used by amdserialize to output the processed modules. var tmpdir = "./tmp/"; // The final output directory. var outdir = "./build/"; // The grunt.config property populated by amdserialize, containing the // list of files to include in the layer. var outprop = "amdoutput"; var decorPatterns = [ // Include "decor/*.js", "requirejs-dplugins/has.js", "requirejs-dplugins/i18n.js", "requirejs-dplugins/css.js", "requirejs-dplugins/jquery.js", "lie/dist/lie.js", // Exclude "!decor/Gruntfile.js" ]; var delitePatterns = [ // Include "delite/**/*.js", //"dojo/dom-geometry.js", // For dtreemap "requirejs-text/text.js", // Exclude "!delite/Gruntfile.js", "!delite/node_modules/**", "!delite/nls/**", "!delite/samples/**", "!delite/tests/**", "!delite/TooltipDialog.js", "!delite/Tooltip.js", "!delite/Overlay.js", "!delite/Opener.js", "!delite/DialogLevelManager.js", "!delite/DialogBase.js", "!delite/Dialog.js" ]; var delitefulPatterns = [ // Include "deliteful/**/*.js", // Exclude "!deliteful/tests/**", "!deliteful/samples/**", "!deliteful/docs/**", "!deliteful/**/holodark/**", "!deliteful/**/ios/**", "!deliteful/Gruntfile.js" ]; var dtreemapPatterns = [ // Include "dtreemap/**/*.js", // Exclude "!dtreemap/tests/**", "!dtreemap/demos/**", "!dtreemap/docs/**", "!dtreemap/Gruntfile.js" ]; var expandFiles = { filter: "isFile" }; function trimExt(path) { return path.slice(0, -3); } grunt.initConfig({ // The loader config should go here. amdloader: { baseUrl: "./", // Enable build of requirejs-text/text inlineText: true, map: { jquery: { "jquery/src/selector": "jquery/src/selector-native" // don't pull in sizzle } } }, amdbuild: { buildPlugin: true, // dir is the output directory. dir: tmpdir, runtimePlugins: [], // List of layers to build. layers: [{ name: "decor/layer", include: grunt.file.expand(expandFiles, decorPatterns).map(trimExt) }, { name: "dpointer/layer", includeFiles: ["dpointer/events.js", "dpointer/handlers/*.js"] }, { name: "ecma402/layer", include: ["ecma402/IntlShim"], exclude: ["requirejs-dplugins/has", "requirejs-text/text"] }, { name: "delite/layer", include: grunt.file.expand(expandFiles, delitePatterns).map(trimExt) .concat(["delite/theme!delite/themes/{{theme}}/global.css"]), excludeLayers: ["decor/layer", "dpointer/layer", "ecma402/layer"] }, { name: "deliteful/layer", include: grunt.file.expand(expandFiles, delitefulPatterns).map(trimExt), exclude: ["dstore/Memory", "dstore/Trackable", "dstore/Filter"], excludeLayers: ["decor/layer", "dpointer/layer", "ecma402/layer", "delite/layer"] }/*, { name: "dtreemap/layer", include: grunt.file.expand(expandFiles, dtreemapPatterns).map(trimExt), exclude: ["dstore/Memory", "dstore/Trackable", "dstore/Filter"], excludeLayers: ["decor/layer", "dpointer/layer", "delite/layer"] }, { name: "dcolor/layer", includeFiles: ["dcolor/*.js"],
name: "liaison/layer", includeFiles: ["liaison/**//*.js"], excludeFiles: ["liaison/delite/**", "liaison/polymer/**", "liaison/tests/**", "liaison/samples/**", "liaison/docs/**", "liaison/node_modules/**", "liaison/Gruntfile.js"] }, { name: "liaison/delite/layer", includeFiles: ["liaison/delite/**//*.js"], excludeFiles: ["liaison/delite/widgets/StarRating.js"] }*/] }, updateSamples: { liaison: { samples: { src: [ "samples/*", "samples/css/*", "!samples/loan.html" ] } }, "liaison/delite": { samples: { src: [ "../samples/delite/*", "!**/samples/delite/widgetskitchensink.html", "!**/loan.html" ]/*, deps: ["delite", "liaison"]*/ } } }, // Config to allow uglify to generate the layer. uglify: { options: { banner: "<%= " + outprop + ".header%>", sourceMap: true }, dist: { src: "<%= " + outprop + ".modules.abs %>", dest: outdir + "<%= " + outprop + ".layerPath %>" } }, // Copy the plugin files to the real output directory. copy: { plugins: { expand: true, cwd: tmpdir, src: "<%= " + outprop + ".plugins.rel %>", dest: outdir, dot: true } }, // Erase temp directory and previous build clean: { erase: [outdir],//.concat(libDirsBuild), finish: [tmpdir] } }); // The main build task. grunt.registerTask("amdbuild", function (amdloader) { function useAmdDepsScan(name) { var layerToGetDeps = ["delite/layer", "decor/layer", "deliteful/layer", "ecma402/layer", "dtreemap/layer"]; return layerToGetDeps.indexOf(name) >= 0; } // Create tasks list var tasksList = []; var name = this.name; var layers = grunt.config(name).layers; layers.forEach(function (layer) { if (useAmdDepsScan(layer.name)) { tasksList.push("amddepsscan:" + layer.name + ":" + name + ":" + amdloader); } else { tasksList.push("amddirscan:" + layer.name + ":" + name + ":" + amdloader); } tasksList.push("amdserialize:" + layer.name + ":" + name + ":" + amdloader + ":" + outprop); tasksList.push("uglify"); tasksList.push("correctSourceMap:" + layer.name + ":" + name + ":" + outdir); // Remove references to useless html template before copying plugins files. tasksList.push("filterPluginFiles:\\.(html|json)\\.js$:" + outprop); tasksList.push("copy:plugins"); }); tasksList.push("updateBowers:" + name + ":" + outdir); tasksList.push("addBoot:" + name + ":" + outdir); tasksList.push("copyMetaFiles:" + name + ":" + outdir); tasksList.push("updateSamples:" + name + ":" + outdir); tasksList.push("copyBuildResults:" + name + ":" + outdir); grunt.task.run(tasksList); }); // Load the plugin that provides the "amd" task. grunt.loadNpmTasks("grunt-amd-build"); grunt.loadTasks("./tasks/"); // Load vendor plugins. grunt.loadNpmTasks("grunt-contrib-uglify"); grunt.loadNpmTasks("grunt-contrib-concat"); grunt.loadNpmTasks('grunt-contrib-copy'); grunt.loadNpmTasks('grunt-contrib-clean'); // Default task. grunt.registerTask("default", ["clean:erase", "amdbuild:amdloader", "amdreportjson:amdbuild", "clean:finish"]); };
excludeFiles: ["dcolor/Gruntfile.js"] }, {
wru.console.max.js
/*! (C) Andrea Giammarchi, @WebReflection - Mit Style License */ /**@license (C) Andrea Giammarchi, @WebReflection - Mit Style License */ // revisited by Andrea Giammarchi, @WebReflection // compatible with both Rhino and Node // now it is possible to include this file in the server console without rhinoTimers dependencies // @link http://stackoverflow.com/questions/2261705/how-to-run-a-javascript-function-asynchronously-without-using-settimeout // glory and fortune to to Weston C for the inital hint // but it's also RIDICULOUS Rhino does not implement in core timers properly! // condition to avoid problems with jsc if (typeof global != "undefined") { var setTimeout = global.setTimeout, setInterval = global.setInterval, clearInterval = global.clearInterval, clearTimeout = global.clearTimeout ; setTimeout || (function (timer, ids, slice, counter) { // did you know? // all browsers but IE accept one or more arguments // to pass to the callbacl after the timer/delay number // ... so does Rhino now! setInterval = global.setInterval = function setInterval(fn, delay) { return schedule(fn, delay, slice.call(arguments, 2), 1); }; setTimeout = global.setTimeout = function setTimeout(fn, delay) { return schedule(fn, delay, slice.call(arguments, 2)); }; clearInterval = global.clearInterval = clearTimeout = global.clearTimeout = function clearInterval(id) { ids[id].cancel(); timer.purge(); delete ids[id]; }; function schedule(fn, delay, args, interval) { var id = ++counter; ids[id] = new JavaAdapter(java.util.TimerTask,{run: function () { fn.apply(null, args); }}); interval ? timer.schedule(ids[id], delay, delay) : timer.schedule(ids[id], delay) ; return id; } })(new java.util.Timer(), {}, [].slice, 0); } else { // jsc specific hack !function (global, i, cbs, slice) { function setTimeout(cb, delay) { var t = new Date; while (new Date - t < delay); cb.apply(null, slice.call(arguments, 2)); } slice = cbs.slice; global.setTimeout = global.setInterval = setTimeout; global.clearInterval = global.clearTimeout = function () {}; }(this, 0, []); } var wru = function (window) {"use strict"; /** * Copyright (C) 2011 by Andrea Giammarchi, @WebReflection * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ // console specific version function isGonnaBeLegen() { current = shift.call(queue); if (current) { if (typeof current == "function") { current = {name: current[NAME] || "anonymous", test: current}; } log(OUTPUT_SEPARATOR); log( (iHasIt(current, NAME) && current[NAME]) || (iHasIt(current, DESCRIPTION) && current[DESCRIPTION]) || UNKNOWN ); pass = []; fail = []; fatal = []; tmp = {}; giveItATry("setup"); fatal[LENGTH] || giveItATry("test"); waitForIt || Dary(); } else { showSummary(); } } function log(info, avoidNewLine) { info = info + (avoidNewLine ? "" : "\n"); try { // node 0.11+ alternative ... process.stdout.write(info); } catch(up) { try { // node 0.6 require("util").print(info); } catch(up) { try { // node 0.4 require("sys").print(info); } catch(up) { try { // hello Rhino // print uses println ... while we need print without \n java.lang.System.out.print(info); } catch(up) { try { // phantomjs or default fallback console.log(info); } catch(up) { // jsc and others print(info); } } } } } } function showSummary() { var code = 0, status; log(EMPTY); log(OUTPUT_SEPARATOR); switch (true) { case !!overallFatal: code++; status = "error"; log(ERROR + " " + overallFatal + " Errors"); break; case !!overallFail: code++; status = "fail"; log(FAILURE + EMPTY + overallFail + " Failures"); break; default: status = "pass"; log(OK + " " + overallPass + " Passes"); } wru.status = status; log(OUTPUT_SEPARATOR); log(EMPTY); wru.after(); try { // node.js process.exit(code); } catch(up) { // rhino quit(); } } function writeItOrdered(fail) { for (var i = 0, length = fail[LENGTH]; i < length; log(" " + (++i) + ". " + fail[i - 1]) ); } function Dary() { clearDaryTimeou(); overallPass += pass[LENGTH]; overallFail += fail[LENGTH]; overallFatal += fatal[LENGTH]; if (fatal[LENGTH]) { prefix = ERROR; writeItOrdered(fatal); } else if(fail[LENGTH]) { prefix = FAILURE; writeItOrdered(fail); } else { prefix = OK; } log(prefix + " passes: " + pass[LENGTH] + ", fails: " + fail[LENGTH] + ", errors: " + fatal[LENGTH]); ci = 0; prefix = EMPTY; isGonnaBeLegen(); } // common functions for all versions function giveItATry(name) { if (iHasIt(current, name)) { try { current[name](tmp); } catch(doooodeThisIsBAD) { push.call(fatal, EMPTY + doooodeThisIsBAD); } } } function iHasIt(object, name) { return hasOwnProperty.call(object, name); } function messItUp() { return random() < .5 ? -1 : 1; } function
() { if (daryTimeout) { clearTimeout(daryTimeout); daryTimeout = 0; } giveItATry("teardown"); } var // wru library core wru = { timeout: TIMEOUT, assert: function assert(description, result) { // if no description provided, variables are shifted // these are both valid wru.assert calls indeed // wru.assert(truishValue); // wru.assert("test description", truishValue); if (arguments[LENGTH] == 1) { result = description; description = UNKNOWN; } // flag used in wru.async to verify at least // one assertion was performed called = TRUE; // store the result in the right collection push.call(result ? pass : fail, prefix + description); // just to add a bit of sugar return result; }, async: function async(description, callback, timeout, p) { var delay = timeout || wru.timeout || (wru.timeout = TIMEOUT); // p is used as sentinel // it defines the anonymous name // if necessary and it's used to flag the timeout p = ++waitForIt; // if no description provided, variables are shifted // these are all valid wru.async calls indeed, timeout is optional // wru.async(function () { ... }) // wru.async("test description", function () { ... }) // wru.async(function () { ... }, timeout) // wru.async("test description", function () { ... }, timeout) if (typeof description == "function") { delay = callback || wru.timeout; callback = description; description = "asynchronous test #" + p; } // if in *TIMEOUT* time nothing happens ... timeout = setTimeout(function () { // p is flagged as 0 p = 0; // timeout is handled as failure, not error (could be the server) push.call(fail, description); // if there is no reason to waitForIt then is time to call Dary() --waitForIt || (daryTimeout = setTimeout(Dary, 0)); }, // timeout can be specified // this procedure ensure that it's // a number and it's greater than 0 abs(delay) || wru.timeout ); // the async function is a wrap of the passed callback return function async() { // if it's executed after the timeout nothing happens // since the failure has been already notified if (!p) return; // called is always set as *TRUE* during any assertion // this indicates if the callback made at least one assertion // as example, in this case the callback could be called many time // with different readyState ... however, only on readyState 4 // there is the assertion we are interested about, e.g. // // xhr.onreadystatechange = wru.async(function (){ // if (this.readyState == 4) // wru.assert("content", this.responseText.length) // ; // }); // // in above example called will be flagged as true // only during last readyState call called = FALSE; // simply recycled "string" variable // prefix will be internally used by assert during function execution prefix = description + ": "; // the original callback is called with proper *this* if specified try { callback.apply(this, arguments); } catch(doooodeThisIsBAD) { // if there is an Error // the test is screwed up // called has to be set as *TRUE* to invalidate the test called = TRUE; // message is "casted" to avoid IE host objects errors problem // (or any other possible edge case) push.call(fatal, prefix + doooodeThisIsBAD); } // prefix can be *EMPTY* string again now prefix = EMPTY; // a failure or at least an assertion if (called) { // timeout not necessary anymore clearTimeout(timeout); // if there is no reason to waitForIt then is time to call Dary() --waitForIt || (daryTimeout = setTimeout(Dary, 0)); } }; }, // wru.test({...test...}) // wru.test([{...test...}, {...test...}, ...]) // the {...test...} object should have a string name and a function test property // optionally a function setup and a function teardown too test: function test(list, after) { // in case you need to do something after wru.after = after || function () {}; // test may be called multiple times // queue should simply concatenate other calls queue = concat.apply(queue, [list]); // if wru.random is true, the queue is ranodomized // this is to make tests indipendent from each others wru.random && sort.call(queue, messItUp); // if there is no test to waitForIt // Dary() has been called already // we can procede with next test // invoking isGonnaBeLegen() waitForIt || isGonnaBeLegen(); } }, // common private variables / constants / shortcuts TRUE = true, FALSE = !TRUE, TIMEOUT = 100, EMPTY = " ", UNKNOWN = "unknown", LENGTH = "length", NAME = "name", DESCRIPTION = "description", LISTART = "<li>", LIEND = "</li>", cursor = "\\|/-", hasOwnProperty = wru.hasOwnProperty, prefix = EMPTY, charAt = prefix.charAt, slice = prefix.slice, queue = [], concat = queue.concat, join = queue.join, push = queue.push, shift = queue.shift, sort = queue.sort, waitForIt = 0, ci = 0, overallPass = 0, overallFail = 0, overallFatal = 0, daryTimeout = 0, // these variables are used on console version only ERROR = "\x1B[1;31mERROR\x1B[0m", FAILURE = "\x1B[0;31mFAILURE\x1B[0m", OK = "\x1B[0;32mOK\x1B[0m", OUTPUT_SEPARATOR = "------------------------------", // shared across the whole private scope Math, abs, random, setTimeout, clearTimeout, current, node, pass, fail, fatal, tmp, called ; wru.log = function (obj, printOnly) { try { if (printOnly) { throw new Error; } console.log(obj); } catch(o_O) { log(obj, 0); } }; // node.js exports if (typeof __dirname != "undefined") { window.wru = wru; window.assert = wru.assert; window.async = wru.async; window.test = wru.test; window.log = wru.log; window.random = false; Object.defineProperty(window, "status", {get: function () { return wru.status; }}); Object.defineProperty(window, "timeout", { get: function () { return wru.timeout; }, set: function (value) { wru.timeout = parseInt(value, 10) || wru.timeout; } }); // re-assign window to make it global window = global; } // these are window/global object dependent // must be eventually defined after wru.export.js, if used Math = window.Math; abs = Math.abs; random = Math.random; setTimeout = window.setTimeout; clearTimeout = window.clearTimeout; // "THE CURSOR" http://3site.eu/cursor window.setInterval(function () { waitForIt && log(EMPTY + charAt.call(cursor, ci++%4) + "\b\b", true); }, TIMEOUT); //^ this is useful to test internals on non minified version wru.debug = function (O_o) { return eval("(" + O_o + ")"); }; //$ and this block is removed at build time TIMEOUT *= TIMEOUT; // by default, timeout is 10000 (10 seconds) // this is the place you can set it, e.g. // TIMEOUT = 2000; // 2 seconds wru.random = FALSE; // by default tests order is preseverd // set wru.random = TRUE to randomly sort them return wru; }(this);
clearDaryTimeou
handler.go
package runtimemapping import ( "context" "encoding/json" "fmt" "net/http" "github.com/form3tech-oss/jwt-go" "github.com/kyma-incubator/compass/components/director/pkg/log" "github.com/kyma-incubator/compass/components/director/internal/model" "github.com/kyma-incubator/compass/components/director/internal/oathkeeper" "github.com/kyma-incubator/compass/components/director/pkg/persistence" "github.com/pkg/errors" ) //go:generate mockery --name=TokenVerifier --output=automock --outpkg=automock --case=underscore type TokenVerifier interface { Verify(ctx context.Context, token string) (*jwt.MapClaims, error) } //go:generate mockery --name=RuntimeService --output=automock --outpkg=automock --case=underscore type RuntimeService interface { GetByTokenIssuer(ctx context.Context, issuer string) (*model.Runtime, error) } //go:generate mockery --name=TenantService --output=automock --outpkg=automock --case=underscore type TenantService interface { GetExternalTenant(ctx context.Context, id string) (string, error) } //go:generate mockery --name=ReqDataParser --output=automock --outpkg=automock --case=underscore type ReqDataParser interface { Parse(req *http.Request) (oathkeeper.ReqData, error) } type Handler struct { reqDataParser ReqDataParser transact persistence.Transactioner tokenVerifier TokenVerifier runtimeSvc RuntimeService tenantSvc TenantService } func NewHandler( reqDataParser ReqDataParser, transact persistence.Transactioner, tokenVerifier TokenVerifier, runtimeSvc RuntimeService, tenantSvc TenantService) *Handler { return &Handler{ reqDataParser: reqDataParser, transact: transact, tokenVerifier: tokenVerifier, runtimeSvc: runtimeSvc, tenantSvc: tenantSvc, } } func (h *Handler) ServeHTTP(writer http.ResponseWriter, req *http.Request) { if req.Method != http.MethodPost { http.Error(writer, fmt.Sprintf("Bad request method. Got %s, expected POST", req.Method), http.StatusBadRequest) return } ctx := req.Context() reqData, err := h.reqDataParser.Parse(req) if err != nil { h.logError(ctx, err, "An error has occurred while parsing the request.") h.respond(ctx, writer, oathkeeper.ReqBody{}) return } tx, err := h.transact.Begin() if err != nil { h.logError(ctx, err, "An error has occurred while opening the db transaction.") h.respond(ctx, writer, reqData.Body) return } defer h.transact.RollbackUnlessCommitted(ctx, tx) ctx = persistence.SaveToContext(req.Context(), tx) err = h.processRequest(ctx, &reqData) if err != nil { h.logError(ctx, err, "An error has occurred while processing the request.") h.respond(ctx, writer, reqData.Body) return } if err = tx.Commit(); err != nil { h.logError(ctx, err, "An error has occurred while committing the transaction.") h.respond(ctx, writer, reqData.Body) return } h.respond(ctx, writer, reqData.Body)
claims, err := h.tokenVerifier.Verify(ctx, reqData.Header.Get("Authorization")) if err != nil { return errors.Wrap(err, "while verifying the token") } issuer, err := getTokenIssuer(*claims) if err != nil { return errors.Wrap(err, "unable to get the issuer") } runtime, err := h.runtimeSvc.GetByTokenIssuer(ctx, issuer) if err != nil { return errors.Wrap(err, "when getting the runtime") } extTenantID, err := h.tenantSvc.GetExternalTenant(ctx, runtime.Tenant) if err != nil { return errors.Wrap(err, "unable to fetch external tenant based on runtime tenant") } reqData.SetExternalTenantID(extTenantID) reqData.SetExtraFromClaims(*claims) return nil } func (h *Handler) logError(ctx context.Context, err error, message string) { log.C(ctx).WithError(err).Error(message) } func (h *Handler) respond(ctx context.Context, writer http.ResponseWriter, body oathkeeper.ReqBody) { writer.Header().Set("Content-Type", "application/json") err := json.NewEncoder(writer).Encode(body) if err != nil { h.logError(ctx, err, "An error has occurred while encoding data.") } }
} func (h *Handler) processRequest(ctx context.Context, reqData *oathkeeper.ReqData) error {
page-switch.ts
import { attribute, classNames } from '@ember-decorators/component'; import { action } from '@ember-decorators/object'; import Component from '@ember/component'; import layout from '../templates/components/page-switch'; /** * The page switch component is an easy switch button to change the page of a page layout. * * ```hbs * <PageSwitch @page={{activePage}} @pageChanged={{action 'changePage'}} as |switch|> * <switch.button @value='rocket'><i class="fa fa-rocket"></i></switch.button> * <switch.button @value='jet'><i class="fa fa-jet"></i></switch.button> * </PageSwitch> * ``` *
@classNames('btn-group') export default class PageSwitch extends Component { /** @hidden */ layout = layout; // attributes /** @hidden */ @attribute role: string = 'group'; // arguments /** * Is the active page, relates to the `name` argument of each page. * @argument */ page: string = this.page || ''; /** * Is a page required at all or can it be null to display no page at all? * @argument */ required: boolean = true; @action changePage(key: string) { key = !this.required && key === this.page ? '' : key; this.trigger('pageChanged', key); } }
* @yield {Object} switch - The yielded API hash * @yield {Button} switch.button - Button component */
util.go
package util import ( "reflect" ) func MapCopy(dst, src interface{}) { dv, sv := reflect.ValueOf(dst), reflect.ValueOf(src) for _, k := range sv.MapKeys() { dv.SetMapIndex(k, sv.MapIndex(k)) } } // Subset returns whether m1 is a subset of m2 func
(m1 map[string]string, m2 map[string]string) bool { //empty set is not a subset of any set if m1 == nil || m2 == nil || len(m1) == 0 || len(m2) < len(m1) { return false } for k, v := range m1 { if val, ok := m2[k]; ok { if !reflect.DeepEqual(val, v) { return false } } } return true; } func Contains(vs []string, t string) bool { for _, v := range vs { if v == t { return true } } return false }
Subset
test_celery_command.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest from argparse import Namespace from tempfile import NamedTemporaryFile from unittest import mock import pytest import sqlalchemy import airflow from airflow.cli import cli_parser from airflow.cli.commands import celery_command from airflow.configuration import conf from tests.test_utils.config import conf_vars class TestWorkerPrecheck(unittest.TestCase): @mock.patch('airflow.settings.validate_session')
def test_error(self, mock_validate_session): """ Test to verify the exit mechanism of airflow-worker cli by mocking validate_session method """ mock_validate_session.return_value = False with self.assertRaises(SystemExit) as cm: celery_command.worker(Namespace(queues=1, concurrency=1)) self.assertEqual(cm.exception.code, 1) @conf_vars({('core', 'worker_precheck'): 'False'}) def test_worker_precheck_exception(self): """ Test to check the behaviour of validate_session method when worker_precheck is absent in airflow configuration """ self.assertTrue(airflow.settings.validate_session()) @mock.patch('sqlalchemy.orm.session.Session.execute') @conf_vars({('core', 'worker_precheck'): 'True'}) def test_validate_session_dbapi_exception(self, mock_session): """ Test to validate connection failure scenario on SELECT 1 query """ mock_session.side_effect = sqlalchemy.exc.OperationalError("m1", "m2", "m3", "m4") self.assertEqual(airflow.settings.validate_session(), False) @pytest.mark.integration("redis") @pytest.mark.integration("rabbitmq") @pytest.mark.backend("mysql", "postgres") class TestWorkerServeLogs(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = cli_parser.get_parser() @mock.patch('airflow.cli.commands.celery_command.worker_bin') @conf_vars({("core", "executor"): "CeleryExecutor"}) def test_serve_logs_on_worker_start(self, mock_worker): with mock.patch('airflow.cli.commands.celery_command.Process') as mock_process: args = self.parser.parse_args(['celery', 'worker', '--concurrency', '1']) with mock.patch('celery.platforms.check_privileges') as mock_privil: mock_privil.return_value = 0 celery_command.worker(args) mock_process.assert_called() @mock.patch('airflow.cli.commands.celery_command.worker_bin') @conf_vars({("core", "executor"): "CeleryExecutor"}) def test_skip_serve_logs_on_worker_start(self, mock_worker): with mock.patch('airflow.cli.commands.celery_command.Process') as mock_popen: args = self.parser.parse_args(['celery', 'worker', '--concurrency', '1', '--skip-serve-logs']) with mock.patch('celery.platforms.check_privileges') as mock_privil: mock_privil.return_value = 0 celery_command.worker(args) mock_popen.assert_not_called() @pytest.mark.backend("mysql", "postgres") class TestCeleryStopCommand(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = cli_parser.get_parser() @mock.patch("airflow.cli.commands.celery_command.setup_locations") @mock.patch("airflow.cli.commands.celery_command.psutil.Process") @conf_vars({("core", "executor"): "CeleryExecutor"}) def test_if_right_pid_is_read(self, mock_process, mock_setup_locations): args = self.parser.parse_args(['celery', 'stop']) pid = "123" # Calling stop_worker should delete the temporary pid file with self.assertRaises(FileNotFoundError): with NamedTemporaryFile("w+") as f: # Create pid file f.write(pid) f.flush() # Setup mock mock_setup_locations.return_value = (f.name, None, None, None) # Check if works as expected celery_command.stop_worker(args) mock_process.assert_called_once_with(int(pid)) mock_process.return_value.terminate.assert_called_once_with() @mock.patch("airflow.cli.commands.celery_command.read_pid_from_pidfile") @mock.patch("airflow.cli.commands.celery_command.worker_bin.worker") @mock.patch("airflow.cli.commands.celery_command.setup_locations") @conf_vars({("core", "executor"): "CeleryExecutor"}) def test_same_pid_file_is_used_in_start_and_stop( self, mock_setup_locations, mock_celery_worker, mock_read_pid_from_pidfile ): pid_file = "test_pid_file" mock_setup_locations.return_value = (pid_file, None, None, None) mock_read_pid_from_pidfile.return_value = None # Call worker worker_args = self.parser.parse_args(['celery', 'worker', '--skip-serve-logs']) celery_command.worker(worker_args) run_mock = mock_celery_worker.return_value.run assert run_mock.call_args _, kwargs = run_mock.call_args assert 'pidfile' in kwargs assert kwargs['pidfile'] == pid_file # Call stop stop_args = self.parser.parse_args(['celery', 'stop']) celery_command.stop_worker(stop_args) mock_read_pid_from_pidfile.assert_called_once_with(pid_file) @pytest.mark.backend("mysql", "postgres") class TestWorkerStart(unittest.TestCase): @classmethod def setUpClass(cls): cls.parser = cli_parser.get_parser() @mock.patch("airflow.cli.commands.celery_command.setup_locations") @mock.patch('airflow.cli.commands.celery_command.Process') @mock.patch('airflow.cli.commands.celery_command.worker_bin') @conf_vars({("core", "executor"): "CeleryExecutor"}) def test_worker_started_with_required_arguments(self, mock_worker, mock_popen, mock_locations): pid_file = "pid_file" mock_locations.return_value = (pid_file, None, None, None) concurrency = '1' celery_hostname = "celery_hostname" queues = "queue" autoscale = "2,5" args = self.parser.parse_args([ 'celery', 'worker', '--autoscale', autoscale, '--concurrency', concurrency, '--celery-hostname', celery_hostname, '--queues', queues ]) with mock.patch('celery.platforms.check_privileges') as mock_privil: mock_privil.return_value = 0 celery_command.worker(args) mock_worker.worker.return_value.run.assert_called_once_with( pool='prefork', optimization='fair', O='fair', # noqa queues=queues, pidfile=pid_file, concurrency=int(concurrency), autoscale=autoscale, hostname=celery_hostname, loglevel=conf.get('logging', 'LOGGING_LEVEL'), )
plots.rs
use structopt::StructOpt; use indicatif::{ProgressBar, ProgressStyle}; use swapct::account::{Account, OTAccount}; use swapct::commitment::{TypeCommitment, Type}; use rand::random; use curve25519_dalek::scalar::Scalar; use std::time::Instant; use swapct::offer::{Offer, get_test_ring}; use std::fs::File; use std::io::Write; use std::collections::HashMap; #[derive(StructOpt, Debug, Clone)] #[structopt(name = "SwapCT", about = "Runs the performance tests for SwapCT")] struct Opt { #[structopt( short = "s", long = "statistics", help = "How many repetitions", default_value = "3" )] statistics: u64, #[structopt( short = "o", long = "max-outputs", help = "Maximum outputs", default_value = "2" )] outputs: u64, #[structopt( short = "r", long = "ring-size", help = "Anonymity ring size: must be 2^n-5, e.g. 11,27,59,123,...", default_value = "11" )] ring: usize, } fn
() -> Result<(),std::io::Error> { let opt = Opt::from_args(); println!("using {:?}", opt); let acct = Account::new(); let typ = TypeCommitment::type_gen(&String::from("mytype")); let bar = ProgressBar::new(opt.statistics*opt.outputs); bar.set_style(ProgressStyle::default_bar() .template("[{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta})") .progress_chars("#>-")); let mut time: HashMap<&str,Vec<Vec<u128>>> = vec!["gen","merge","seal","vfo","ver"].iter().map(|t|(t.clone(), (0..opt.outputs).map(|_| Vec::new()).collect())).collect(); for _ in 0..opt.statistics { for inouts in 1..opt.outputs+1 { bar.inc(1); let mut otas = Vec::<OTAccount>::new(); let mut inamt = 0u64; for _ in 0..inouts { let r = random::<u64>() % (2u64.pow(40)); otas.push(acct.derive_ot(&typ, &Scalar::from(r))); inamt += r; } let totalin = inamt; let mut outs = Vec::<(Account, Type, Scalar)>::new(); let mut inamt = totalin; for _ in 0..(inouts - 1) { let r = random::<u64>() % inamt; outs.push((acct, typ, Scalar::from(r))); inamt -= r; } outs.push((acct, typ, Scalar::from(inamt))); let start = Instant::now(); let off = Offer::offer(&otas, &outs.iter().map(|(a,t,v)|(a,t,v)).collect(), &vec![get_test_ring(opt.ring); otas.len()]); let sealstart = Instant::now(); let tx = off.seal(None); let gen = start.elapsed().as_micros(); let seal = sealstart.elapsed().as_micros(); time.get_mut("gen").unwrap()[inouts as usize-1].push(gen); time.get_mut("seal").unwrap()[inouts as usize-1].push(seal); let start = Instant::now(); let vseal = tx.verify_seal(); let voffer = start.elapsed().as_micros(); time.get_mut("vfo").unwrap()[inouts as usize-1].push(voffer); assert!(vseal.is_ok()); let off2 = Offer::offer(&otas, &outs.iter().map(|(a,t,v)|(a,t,v)).collect(), &vec![get_test_ring(opt.ring); otas.len()]); let start = Instant::now(); let moff = off+off2; let merge = start.elapsed().as_micros(); time.get_mut("merge").unwrap()[inouts as usize-1].push(merge); assert!(moff.verify().is_ok()); let start = Instant::now(); let v = tx.verify(); let verify = start.elapsed().as_micros(); time.get_mut("ver").unwrap()[inouts as usize-1].push(verify); assert!(v.is_ok()); } } bar.finish(); let seconds = vec!["gen","seal"]; let stat: HashMap<&str, Vec<(usize,f32,f32,f32)> > = time.iter().map(|(typ,v)|(typ.clone(),{ v.iter().enumerate().map(|(i,t)| { let mut times = t.clone(); times.sort(); let median = times[times.len()/2]; if seconds.contains(typ) { (i+1,(median as f32/1000000f32), (median-times[0])as f32/1000000f32, (times[times.len()-1]-median) as f32/1000000f32) } else { (i+1,(median as f32/1000f32), (median-times[0])as f32/1000f32, (times[times.len()-1]-median) as f32/1000f32) } }).collect() }) ).collect(); let genname = String::from("plots/generation.tex"); let mut genfile = File::create(genname).expect("file not writable"); let offset = 0.0; writeln!(&mut genfile,"\\addplot[only marks,mark=square, red,mark options={{solid}},error bars/.cd,y dir=both,y explicit] coordinates {{")?; for g in stat.get("merge").unwrap() { writeln!(&mut genfile,"({},{}) -= (0,{}) += (0,{}) ",(g.0 as f32)+offset,g.1,g.2,g.3)?; } writeln!(&mut genfile,"}}; \\addlegendentry{{merge, ${}$}};", opt.ring)?; writeln!(&mut genfile,"\\addplot[only marks,mark=triangle*, red,mark options={{solid}},error bars/.cd,y dir=both,y explicit] coordinates {{")?; for g in stat.get("gen").unwrap().iter().zip(stat.get("seal").unwrap()) { writeln!(&mut genfile,"({},{}) -= (0,{}) += (0,{}) ",(g.0.0 as f32)+offset,g.0.1+g.1.1,g.0.2+g.1.2,g.0.3+g.1.3)?; } writeln!(&mut genfile,"}}; \\addlegendentry{{offer+seal, ${}$, you}};", opt.ring)?; writeln!(&mut genfile,"\\addplot[only marks,mark=o, red,mark options={{solid}},error bars/.cd,y dir=both,y explicit] coordinates {{")?; for g in stat.get("seal").unwrap() { writeln!(&mut genfile,"({},{}) -= (0,{}) += (0,{}) ",(g.0 as f32)+offset,g.1,g.2,g.3)?; } writeln!(&mut genfile,"}}; \\addlegendentry{{seal, ${}$}};", opt.ring)?; let vername = String::from("plots/verification.tex"); let mut verfile = File::create(vername).expect("file not writable"); writeln!(&mut verfile,"\\addplot[only marks,mark=triangle*, blue,mark options={{solid}},error bars/.cd,y dir=both,y explicit] coordinates {{")?; for g in stat.get("vfo").unwrap() { writeln!(&mut verfile,"({},{}) -= (0,{}) += (0,{}) ",(g.0 as f32)+offset,g.1,g.2,g.3)?; } writeln!(&mut verfile,"}}; \\addlegendentry{{vf seal, ${}$}};", opt.ring)?; writeln!(&mut verfile,"\\addplot[only marks,mark=triangle*, red,mark options={{solid}},error bars/.cd,y dir=both,y explicit] coordinates {{")?; for g in stat.get("ver").unwrap() { writeln!(&mut verfile,"({},{}) -= (0,{}) += (0,{}) ",(g.0 as f32)+offset,g.1,g.2,g.3)?; } writeln!(&mut verfile,"}}; \\addlegendentry{{SwapCT, ${}$, you}};", opt.ring)?; Ok(()) }
main
test_utils.py
import pytest from telebot import types from tululbot.utils import TululBot, lookup_kamusslang, lookup_urbandictionary, lookup_slang from tululbot.types import Message class TestTululBot: def test_create_bot(self): bot = TululBot('TOKEN') assert bot._telebot is not None assert bot._user is None def test_get_me(self, mocker): bot = TululBot('TOKEN') return_value = 'askldfjlkf' mock_get_me = mocker.patch.object(bot._telebot, 'get_me', autospec=True, return_value=return_value) rv = bot.get_me() assert rv == return_value mock_get_me.assert_called_once_with() def test_send_message(self, mocker): bot = TululBot('TOKEN') return_value = 'some return value' mock_send_message = mocker.patch.object(bot._telebot, 'send_message', return_value=return_value, autospec=True) chat_id = 12345 text = 'Hello world' rv = bot.send_message(chat_id, text) assert rv == return_value mock_send_message.assert_called_once_with(chat_id, text) def test_set_webhook(self, mocker): bot = TululBot('TOKEN') return_value = 'some return value' webhook_url = 'some url' mock_set_webhook = mocker.patch.object(bot._telebot, 'set_webhook', return_value=return_value, autospec=True) rv = bot.set_webhook(webhook_url) assert rv == return_value mock_set_webhook.assert_called_once_with(webhook_url) def test_reply_to(self, mocker, fake_message): bot = TululBot('TOKEN') return_value = 'some return value' mock_reply_to = mocker.patch.object(bot._telebot, 'reply_to', return_value=return_value, autospec=True) text = 'Hello world' rv = bot.reply_to(fake_message, text) assert rv == return_value mock_reply_to.assert_called_once_with(fake_message, text, disable_web_page_preview=False, reply_markup=None) def test_reply_to_with_preview_disabled(self, mocker, fake_message): bot = TululBot('TOKEN') mock_reply_to = mocker.patch.object(bot._telebot, 'reply_to', autospec=True) text = 'Hello world' bot.reply_to(fake_message, text, disable_preview=True) mock_reply_to.assert_called_once_with(fake_message, text, disable_web_page_preview=True, reply_markup=None) def test_reply_to_with_force_reply(self, mocker, fake_message): bot = TululBot('TOKEN') mock_reply_to = mocker.patch.object(bot._telebot, 'reply_to', autospec=True) text = 'dummy text' bot.reply_to(fake_message, text, force_reply=True) args, kwargs = mock_reply_to.call_args assert args == (fake_message, text) assert len(kwargs) == 2 assert 'disable_web_page_preview' in kwargs assert not kwargs['disable_web_page_preview'] assert 'reply_markup' in kwargs assert isinstance(kwargs['reply_markup'], types.ForceReply) def test_forward_message(self, mocker): bot = TululBot('TOKEN') return_value = 'some return value' mock_forward_message = mocker.patch.object(bot._telebot, 'forward_message', return_value=return_value, autospec=True) chat_id = 12345 from_chat_id = 67890 message_id = 42 rv = bot.forward_message(chat_id, from_chat_id, message_id) assert rv == return_value mock_forward_message.assert_called_once_with(chat_id, from_chat_id, message_id) def test_message_handler_with_no_argument(self): bot = TululBot('TOKEN') with pytest.raises(ValueError): @bot.message_handler() def handle(message): pass def test_equals_message_handler(self, mocker, fake_message): bot = TululBot('TOKEN') mock_message_handler = mocker.patch.object(bot._telebot, 'message_handler', autospec=True) @bot.message_handler(equals='/hello') def handle(message): pass args, kwargs = mock_message_handler.call_args assert len(args) == 0 assert len(kwargs) == 1 assert 'func' in kwargs func = kwargs['func'] fake_message.text = '/hello' assert func(fake_message) fake_message.text = '/hello world' assert not func(fake_message) def test_is_reply_to_bot_message_handler(self, mocker, fake_message_dict, fake_user_dict): fake_reply_message_dict = fake_message_dict.copy() bot = TululBot('TOKEN') bot_id = 12345 class FakeUser: def __init__(self, id): self.id = id bot.user = FakeUser(bot_id) mock_message_handler = mocker.patch.object(bot._telebot, 'message_handler', autospec=True) fake_user_dict['id'] = bot_id bot_message = 'Hah?' fake_message_dict['text'] = bot_message fake_message_dict['from'] = fake_user_dict fake_reply_message_dict['reply_to_message'] = fake_message_dict fake_reply_message = Message.from_dict(fake_reply_message_dict) @bot.message_handler(is_reply_to_bot=bot_message) def handle(message): pass args, kwargs = mock_message_handler.call_args assert len(args) == 0 assert len(kwargs) == 1 assert 'func' in kwargs func = kwargs['func'] assert func(fake_reply_message) def test_handle_new_message(self, mocker, fake_message): bot = TululBot('TOKEN') mock_process_new_messages = mocker.patch.object(bot._telebot, 'process_new_messages', autospec=True) bot.handle_new_message(fake_message) mock_process_new_messages.assert_called_once_with([fake_message]) def test_commands_message_handler(self, mocker): bot = TululBot('TOKEN') mock_message_handler = mocker.patch.object(bot._telebot, 'message_handler', autospec=True) @bot.message_handler(commands=['hello']) def handle(message): pass args, kwargs = mock_message_handler.call_args assert len(args) == 0 assert len(kwargs) == 1 assert 'commands' in kwargs assert kwargs['commands'] == ['hello'] def test_user_property(self, mocker, fake_user): bot = TululBot('TOKEN') mock_get_me = mocker.patch.object(bot, 'get_me', autospec=True, return_value=fake_user) rv = bot.user assert rv == fake_user mock_get_me.assert_called_once_with() def test_lookup_kamusslang(mocker): class FakeParagraph: def __init__(self, strings): self.strings = strings strings = ['asdf', 'alsjdf', 'kfdg'] side_effect_pair = { 'close-word-suggestion-text': None, 'term-def': FakeParagraph(strings) } class FakeSoup: def find(self, class_): return side_effect_pair[class_] mocker.patch('tululbot.utils.requests.get') mocker.patch('tululbot.utils.BeautifulSoup', return_value=FakeSoup()) rv = lookup_kamusslang('jdflafj') assert rv == ''.join(strings) def test_lookup_kamusslang_no_definition_found(mocker): side_effect_pair = { 'close-word-suggestion-text': None, 'term-def': None } class FakeSoup: def find(self, class_): return side_effect_pair[class_] mocker.patch('tululbot.utils.requests.get') mocker.patch('tululbot.utils.BeautifulSoup', return_value=FakeSoup()) rv = lookup_kamusslang('jdflafj') assert rv is None def test_lookup_kamusslang_close_word_suggestion(mocker): class FakeParagraph: def __init__(self, strings): self.strings = strings strings = ['asdf', 'alsjdf', 'kfdg'] side_effect_pair = { 'close-word-suggestion-text': 'Apalah', 'term-def': FakeParagraph(strings) } class FakeSoup: def find(self, class_): return side_effect_pair[class_] mocker.patch('tululbot.utils.requests.get') mocker.patch('tululbot.utils.BeautifulSoup', return_value=FakeSoup()) rv = lookup_kamusslang('jdflafj') assert rv is None def test_lookup_kamusslang_conn_error(mocker): class FakeResponse: def __init__(self): self.ok = False mocker.patch('tululbot.utils.requests.get', return_value=FakeResponse()) rv = lookup_kamusslang('asdf jku') assert rv == "Koneksi lagi bapuk nih :'(" def test_lookup_urbandictionary(mocker): fake_definition = [ { 'def': 'mmeeeeooowww', 'example': 'guk guk guk',
'word': 'kaing kaing' }, { 'def': 'grrrrrrrr', 'example': 'tokkeeeeekk tokkeeeekk', 'word': 'aaauuuuuuuu' } ] mocker.patch('tululbot.utils.ud.define', return_value=fake_definition) rv = lookup_urbandictionary('eemmbeekk') assert rv == fake_definition[0]['def'] def test_lookup_urbandictionary_no_definition_found(mocker): fake_no_definition = [ { 'def': "\nThere aren't any definitions for kimcil yet.\nCan you define it?\n", 'example': '', 'word': '¯\\_(ツ)_/¯\n' } ] mocker.patch('tululbot.utils.ud.define', return_value=fake_no_definition) rv = lookup_urbandictionary('eemmbeekk') assert rv is None def test_lookup_slang_when_urbandictionary_has_definition(mocker): fake_definition = 'soba ni itai yo' mocker.patch('tululbot.utils.lookup_urbandictionary', return_value=fake_definition) rv = lookup_slang('kimi no tame ni dekiru koto ga, boku ni aru kana?') assert rv == fake_definition def test_lookup_slang_when_urbandictionary_has_no_definition_but_kamusslang_does(mocker): fake_definition = 'nagareru kisetsu no mannaka de' mocker.patch('tululbot.utils.lookup_urbandictionary', return_value=None) mocker.patch('tululbot.utils.lookup_kamusslang', return_value=fake_definition) rv = lookup_slang('futohi no nagasa wo kanjimasu') assert rv == fake_definition def test_lookup_slang_when_both_urbandictionary_and_kamusslang_has_no_definition(mocker): mocker.patch('tululbot.utils.lookup_urbandictionary', return_value=None) mocker.patch('tululbot.utils.lookup_kamusslang', return_value=None) rv = lookup_slang('hitomi wo tojireba anata ga') assert rv == 'Gak nemu cuy'
cast.rs
fn main()
{ let temp = -3i16; let mut temp2 = 0u16; temp2 = temp as u16; println!("{}", temp2); let msg = "12"; let msg2 = msg as u32; println!("{}", msg2); }
knative_platform_test.go
//go:build integration // +build integration // To enable compilation of this file in Goland, go to "Settings -> Go -> Vendoring & Build Tags -> Custom Tags" and add "integration" /* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package knative import ( "strings" "testing" . "github.com/onsi/gomega" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" . "github.com/apache/camel-k/e2e/support" "github.com/apache/camel-k/pkg/apis/camel/v1" "github.com/apache/camel-k/pkg/util/dsl" "github.com/apache/camel-k/pkg/util/knative" ) func
(t *testing.T) { WithNewTestNamespace(t, func(ns string) { if !knative.IsEnabledInNamespace(TestContext, TestClient(), ns) { t.Error("Knative not installed in the cluster") t.FailNow() } Expect(Kamel("install", "-n", ns).Execute()).To(Succeed()) Eventually(PlatformPhase(ns), TestTimeoutMedium).Should(Equal(v1.IntegrationPlatformPhaseReady)) Eventually(PlatformProfile(ns), TestTimeoutShort).Should(Equal(v1.TraitProfile(""))) cluster := Platform(ns)().Status.Cluster t.Run("run yaml on cluster profile", func(t *testing.T) { Expect(Kamel("run", "-n", ns, "files/yaml.yaml", "--profile", string(cluster)).Execute()).To(Succeed()) Eventually(IntegrationPodPhase(ns, "yaml"), TestTimeoutMedium).Should(Equal(corev1.PodRunning)) Eventually(IntegrationLogs(ns, "yaml"), TestTimeoutShort).Should(ContainSubstring("Magicstring!")) Eventually(IntegrationProfile(ns, "yaml"), TestTimeoutShort).Should(Equal(v1.TraitProfile(string(cluster)))) // Change something in the integration to produce a redeploy Expect(UpdateIntegration(ns, "yaml", func(it *v1.Integration) { it.Spec.Profile = "" content, err := dsl.ToYamlDSL(it.Spec.Flows) assert.NoError(t, err) newData := strings.ReplaceAll(string(content), "string!", "string!!!") newFlows, err := dsl.FromYamlDSLString(newData) assert.NoError(t, err) it.Spec.Flows = newFlows })).To(Succeed()) // Spec profile should be reset by "kamel run" Eventually(IntegrationSpecProfile(ns, "yaml")).Should(Equal(v1.TraitProfile(""))) // When integration is running again ... Eventually(IntegrationPhase(ns, "yaml")).Should(Equal(v1.IntegrationPhaseRunning)) Eventually(IntegrationLogs(ns, "yaml"), TestTimeoutShort).Should(ContainSubstring("Magicstring!!!")) // It should keep the old profile saved in status Eventually(IntegrationProfile(ns, "yaml"), TestTimeoutMedium).Should(Equal(v1.TraitProfile(cluster))) Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed()) }) t.Run("run yaml on automatic profile", func(t *testing.T) { Expect(Kamel("run", "-n", ns, "files/yaml.yaml").Execute()).To(Succeed()) Eventually(IntegrationPodPhase(ns, "yaml"), TestTimeoutMedium).Should(Equal(corev1.PodRunning)) Eventually(IntegrationProfile(ns, "yaml"), TestTimeoutShort).Should(Equal(v1.TraitProfileKnative)) Expect(Kamel("delete", "--all", "-n", ns).Execute()).To(Succeed()) }) }) }
TestKnativePlatform
18_greatest.py
num1 = input("Enter the first number:\n ") num2 = input("Enter the second number:\n ") num3 = input("Enter the third number:\n ") num4 = input("Enter the fourth number:\n ") if (num1>num2) and (num2>num3): print("The greatest number is:", num1) elif (num2>num1) and (num1>num3): print("The greatest nymber is:", num2)
else: print("The greatest number is:", num3)
cpplint.py
#!/usr/bin/env python # # Copyright (c) 2009 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Does google-lint on c++ files. The goal of this script is to identify places in the code that *may* be in non-compliance with google style. It does not attempt to fix up these problems -- the point is to educate. It does also not attempt to find all problems, or to ensure that everything it does find is legitimately a problem. In particular, we can get very confused by /* and // inside strings! We do a small hack, which is to ignore //'s with "'s after them on the same line, but it is far from perfect (in either direction). """ import codecs import copy import getopt import math # for log import os import re import sre_compile import string import sys import unicodedata _USAGE = """ Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] [--counting=total|toplevel|detailed] [--root=subdir] [--linelength=digits] <file> [file] ... The style guidelines this tries to follow are those in https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml Every problem is given a confidence score from 1-5, with 5 meaning we are certain of the problem, and 1 meaning it could be a legitimate construct. This will miss some errors, and is not a substitute for a code review. To suppress false-positive errors of a certain category, add a 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) suppresses errors of all categories on that line. The files passed in will be linted; at least one file must be provided. Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the extensions with the --extensions flag. Flags: output=vs7 By default, the output is formatted to ease emacs parsing. Visual Studio compatible output (vs7) may also be used. Other formats are unsupported. verbose=# Specify a number 0-5 to restrict errors to certain verbosity levels. filter=-x,+y,... Specify a comma-separated list of category-filters to apply: only error messages whose category names pass the filters will be printed. (Category names are printed with the message and look like "[whitespace/indent]".) Filters are evaluated left to right. "-FOO" and "FOO" means "do not print categories that start with FOO". "+FOO" means "do print categories that start with FOO". Examples: --filter=-whitespace,+whitespace/braces --filter=whitespace,runtime/printf,+runtime/printf_format --filter=-,+build/include_what_you_use To see a list of all the categories used in cpplint, pass no arg: --filter= counting=total|toplevel|detailed The total number of errors found is always printed. If 'toplevel' is provided, then the count of errors in each of the top-level categories like 'build' and 'whitespace' will also be printed. If 'detailed' is provided, then a count is provided for each category like 'build/class'. root=subdir The root directory used for deriving header guard CPP variable. By default, the header guard CPP variable is calculated as the relative path to the directory that contains .git, .hg, or .svn. When this flag is specified, the relative path is calculated from the specified directory. If the specified directory does not exist, this flag is ignored. Examples: Assuming that src/.git exists, the header guard CPP variables for src/chrome/browser/ui/browser.h are: No flag => CHROME_BROWSER_UI_BROWSER_H_ --root=chrome => BROWSER_UI_BROWSER_H_ --root=chrome/browser => UI_BROWSER_H_ linelength=digits This is the allowed line length for the project. The default value is 80 characters. Examples: --linelength=120 extensions=extension,extension,... The allowed file extensions that cpplint will check Examples: --extensions=hpp,cpp cpplint.py supports per-directory configurations specified in CPPLINT.cfg files. CPPLINT.cfg file can contain a number of key=value pairs. Currently the following options are supported: set noparent filter=+filter1,-filter2,... exclude_files=regex linelength=80 "set noparent" option prevents cpplint from traversing directory tree upwards looking for more .cfg files in parent directories. This option is usually placed in the top-level project directory. The "filter" option is similar in function to --filter flag. It specifies message filters in addition to the |_DEFAULT_FILTERS| and those specified through --filter command-line flag. "exclude_files" allows to specify a regular expression to be matched against a file name. If the expression matches, the file is skipped and not run through liner. "linelength" allows to specify the allowed line length for the project. CPPLINT.cfg has an effect on files in the same directory and all sub-directories, unless overridden by a nested configuration file. Example file: filter=-build/include_order,+build/include_alpha exclude_files=.*\.cc The above example disables build/include_order warning and enables build/include_alpha as well as excludes all .cc from being processed by linter, in the current directory (where the .cfg file is located) and all sub-directories. """ # We categorize each error message we print. Here are the categories. # We want an explicit list so we can list them all in cpplint --filter=. # If you add a new error message with a new category, add it to the list # here! cpplint_unittest.py should tell you if you forget to do this. _ERROR_CATEGORIES = [ 'build/class', 'build/c++11', 'build/c++14', 'build/c++tr1', 'build/deprecated', 'build/endif_comment', 'build/explicit_make_pair', 'build/forward_decl', 'build/header_guard', 'build/include', 'build/include_alpha', 'build/include_order', 'build/include_what_you_use', 'build/namespaces', 'build/printf_format', 'build/storage_class', 'legal/copyright', 'readability/alt_tokens', 'readability/braces', 'readability/casting', 'readability/check', 'readability/constructors', 'readability/fn_size', 'readability/inheritance', 'readability/multiline_comment', 'readability/multiline_string', 'readability/namespace', 'readability/nolint', 'readability/nul', 'readability/strings', 'readability/todo', 'readability/utf8', 'runtime/arrays', 'runtime/casting', 'runtime/explicit', 'runtime/int', 'runtime/init', 'runtime/invalid_increment', 'runtime/member_string_references', 'runtime/memset', 'runtime/indentation_namespace', 'runtime/operator', 'runtime/printf', 'runtime/printf_format', 'runtime/references', 'runtime/string', 'runtime/threadsafe_fn', 'runtime/vlog', 'whitespace/blank_line', 'whitespace/braces', 'whitespace/comma', 'whitespace/comments', 'whitespace/empty_conditional_body', 'whitespace/empty_if_body', 'whitespace/empty_loop_body', 'whitespace/end_of_line', 'whitespace/ending_newline', 'whitespace/forcolon', 'whitespace/indent', 'whitespace/line_length', 'whitespace/newline', 'whitespace/operators', 'whitespace/parens', 'whitespace/semicolon', 'whitespace/tab', 'whitespace/todo', ] # These error categories are no longer enforced by cpplint, but for backwards- # compatibility they may still appear in NOLINT comments. _LEGACY_ERROR_CATEGORIES = [ 'readability/streams', 'readability/function', ] # The default state of the category filter. This is overridden by the --filter= # flag. By default all errors are on, so only add here categories that should be # off by default (i.e., categories that must be enabled by the --filter= flags). # All entries here should start with a '-' or '+', as in the --filter= flag. _DEFAULT_FILTERS = ['-build/include_alpha'] # The default list of categories suppressed for C (not C++) files. _DEFAULT_C_SUPPRESSED_CATEGORIES = [ 'readability/casting', ] # The default list of categories suppressed for Linux Kernel files. _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [ 'whitespace/tab', ] # We used to check for high-bit characters, but after much discussion we # decided those were OK, as long as they were in UTF-8 and didn't represent # hard-coded international strings, which belong in a separate i18n file. # C++ headers _CPP_HEADERS = frozenset([ # Legacy 'algobase.h', 'algo.h', 'alloc.h', 'builtinbuf.h', 'bvector.h', 'complex.h', 'defalloc.h', 'deque.h', 'editbuf.h', 'fstream.h', 'function.h', 'hash_map', 'hash_map.h', 'hash_set', 'hash_set.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip.h', 'iostream.h', 'istream.h', 'iterator.h', 'list.h', 'map.h', 'multimap.h', 'multiset.h', 'ostream.h', 'pair.h', 'parsestream.h', 'pfstream.h', 'procbuf.h', 'pthread_alloc', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h', 'set.h', 'slist', 'slist.h', 'stack.h', 'stdiostream.h', 'stl_alloc.h', 'stl_relops.h', 'streambuf.h', 'stream.h', 'strfile.h', 'strstream.h', 'tempbuf.h', 'tree.h', 'type_traits.h', 'vector.h', # 17.6.1.2 C++ library headers 'algorithm', 'array', 'atomic', 'bitset', 'chrono', 'codecvt', 'complex', 'condition_variable', 'deque', 'exception', 'forward_list', 'fstream', 'functional', 'future', 'initializer_list', 'iomanip', 'ios', 'iosfwd', 'iostream', 'istream', 'iterator', 'limits', 'list', 'locale', 'map', 'memory', 'mutex', 'new', 'numeric', 'ostream', 'queue', 'random', 'ratio', 'regex', 'scoped_allocator', 'set', 'sstream', 'stack', 'stdexcept', 'streambuf', 'string', 'strstream', 'system_error', 'thread', 'tuple', 'typeindex', 'typeinfo', 'type_traits', 'unordered_map', 'unordered_set', 'utility', 'valarray', 'vector', # 17.6.1.2 C++ headers for C library facilities 'cassert', 'ccomplex', 'cctype', 'cerrno', 'cfenv', 'cfloat', 'cinttypes', 'ciso646', 'climits', 'clocale', 'cmath', 'csetjmp', 'csignal', 'cstdalign', 'cstdarg', 'cstdbool', 'cstddef', 'cstdint', 'cstdio', 'cstdlib', 'cstring', 'ctgmath', 'ctime', 'cuchar', 'cwchar', 'cwctype', ]) # Type names _TYPES = re.compile( r'^(?:' # [dcl.type.simple] r'(char(16_t|32_t)?)|wchar_t|' r'bool|short|int|long|signed|unsigned|float|double|' # [support.types] r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|' # [cstdint.syn] r'(u?int(_fast|_least)?(8|16|32|64)_t)|' r'(u?int(max|ptr)_t)|' r')$') # These headers are excluded from [build/include] and [build/include_order] # checks: # - Anything not following google file name conventions (containing an # uppercase character, such as Python.h or nsStringAPI.h, for example). # - Lua headers. _THIRD_PARTY_HEADERS_PATTERN = re.compile( r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$') # Pattern for matching FileInfo.BaseName() against test file name _TEST_FILE_SUFFIX = r'(_test|_unittest|_regtest)$' # Pattern that matches only complete whitespace, possibly across multiple lines. _EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL) # Assertion macros. These are defined in base/logging.h and # testing/base/public/gunit.h. _CHECK_MACROS = [ 'DCHECK', 'CHECK', 'EXPECT_TRUE', 'ASSERT_TRUE', 'EXPECT_FALSE', 'ASSERT_FALSE', ] # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) for op, replacement in [('==', 'EQ'), ('!=', 'NE'), ('>=', 'GE'), ('>', 'GT'), ('<=', 'LE'), ('<', 'LT')]: _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), ('>=', 'LT'), ('>', 'LE'), ('<=', 'GT'), ('<', 'GE')]: _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement # Alternative tokens and their replacements. For full list, see section 2.5 # Alternative tokens [lex.digraph] in the C++ standard. # # Digraphs (such as '%:') are not included here since it's a mess to # match those on a word boundary. _ALT_TOKEN_REPLACEMENT = { 'and': '&&', 'bitor': '|', 'or': '||', 'xor': '^', 'compl': '~', 'bitand': '&', 'and_eq': '&=', 'or_eq': '|=', 'xor_eq': '^=', 'not': '!', 'not_eq': '!=' } # Compile regular expression that matches all the above keywords. The "[ =()]" # bit is meant to avoid matching these keywords outside of boolean expressions. # # False positives include C-style multi-line comments and multi-line strings # but those have always been troublesome for cpplint. _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') # These constants define types of headers for use with # _IncludeState.CheckNextIncludeOrder(). _C_SYS_HEADER = 1 _CPP_SYS_HEADER = 2 _LIKELY_MY_HEADER = 3 _POSSIBLE_MY_HEADER = 4 _OTHER_HEADER = 5 # These constants define the current inline assembly state _NO_ASM = 0 # Outside of inline assembly block _INSIDE_ASM = 1 # Inside inline assembly block _END_ASM = 2 # Last line of inline assembly block _BLOCK_ASM = 3 # The whole block is an inline assembly block # Match start of assembly blocks _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' r'(?:\s+(volatile|__volatile__))?' r'\s*[{(]') # Match strings that indicate we're working on a C (not C++) file. _SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|' r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))') # Match string that indicates we're working on a Linux Kernel file. _SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)') _regexp_compile_cache = {} # {str, set(int)}: a map from error categories to sets of linenumbers # on which those errors are expected and should be suppressed. _error_suppressions = {} # The root directory used for deriving header guard CPP variable. # This is set by --root flag. _root = None # The project root directory. Used for deriving header guard CPP variable. # This is set by --project_root flag. Must be an absolute path. _project_root = None # The allowed line length of files. # This is set by --linelength flag. _line_length = 80 # The allowed extensions for file names # This is set by --extensions flag. _valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh']) # {str, bool}: a map from error categories to booleans which indicate if the # category should be suppressed for every line. _global_error_suppressions = {} def ParseNolintSuppressions(filename, raw_line, linenum, error): """Updates the global list of line error-suppressions. Parses any NOLINT comments on the current line, updating the global error_suppressions store. Reports an error if the NOLINT comment was malformed. Args: filename: str, the name of the input file. raw_line: str, the line of input text, with comments. linenum: int, the number of the current line. error: function, an error handler. """ matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line) if matched: if matched.group(1): suppressed_line = linenum + 1 else: suppressed_line = linenum category = matched.group(2) if category in (None, '(*)'): # => "suppress all" _error_suppressions.setdefault(None, set()).add(suppressed_line) else: if category.startswith('(') and category.endswith(')'): category = category[1:-1] if category in _ERROR_CATEGORIES: _error_suppressions.setdefault(category, set()).add(suppressed_line) elif category not in _LEGACY_ERROR_CATEGORIES: error(filename, linenum, 'readability/nolint', 5, 'Unknown NOLINT error category: %s' % category) def ProcessGlobalSuppresions(lines): """Updates the list of global error suppressions. Parses any lint directives in the file that have global effect. Args: lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. """ for line in lines: if _SEARCH_C_FILE.search(line): for category in _DEFAULT_C_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True if _SEARCH_KERNEL_FILE.search(line): for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES: _global_error_suppressions[category] = True def ResetNolintSuppressions(): """Resets the set of NOLINT suppressions to empty.""" _error_suppressions.clear() _global_error_suppressions.clear() def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment or global suppression. """ return (_global_error_suppressions.get(category, False) or linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set())) def Match(pattern, s): """Matches the string with the pattern, caching the compiled regexp.""" # The regexp compilation caching is inlined in both Match and Search for # performance reasons; factoring it out into a separate function turns out # to be noticeably expensive. if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].match(s) def ReplaceAll(pattern, rep, s): """Replaces instances of pattern in a string with a replacement. The compiled regex is kept in a cache shared by Match and Search. Args: pattern: regex pattern rep: replacement text s: search string Returns: string with replacements made (or original string if no replacements) """ if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].sub(rep, s) def Search(pattern, s): """Searches the string for the pattern, caching the compiled regexp.""" if pattern not in _regexp_compile_cache: _regexp_compile_cache[pattern] = sre_compile.compile(pattern) return _regexp_compile_cache[pattern].search(s) def _IsSourceExtension(s): """File extension (excluding dot) matches a source file extension.""" return s in ('c', 'cc', 'cpp', 'cxx') class _IncludeState(object): """Tracks line numbers for includes, and the order in which includes appear. include_list contains list of lists of (header, line number) pairs. It's a lists of lists rather than just one flat list to make it easier to update across preprocessor boundaries. Call CheckNextIncludeOrder() once for each header in the file, passing in the type constants defined above. Calls in an illegal order will raise an _IncludeError with an appropriate error message. """ # self._section will move monotonically through this set. If it ever # needs to move backwards, CheckNextIncludeOrder will raise an error. _INITIAL_SECTION = 0 _MY_H_SECTION = 1 _C_SECTION = 2 _CPP_SECTION = 3 _OTHER_H_SECTION = 4 _TYPE_NAMES = { _C_SYS_HEADER: 'C system header', _CPP_SYS_HEADER: 'C++ system header', _LIKELY_MY_HEADER: 'header this file implements', _POSSIBLE_MY_HEADER: 'header this file may implement', _OTHER_HEADER: 'other header', } _SECTION_NAMES = { _INITIAL_SECTION: "... nothing. (This can't be an error.)", _MY_H_SECTION: 'a header this file implements', _C_SECTION: 'C system header', _CPP_SECTION: 'C++ system header', _OTHER_H_SECTION: 'other header', } def __init__(self): self.include_list = [[]] self.ResetSection('') def FindHeader(self, header): """Check if a header has already been included. Args: header: header to check. Returns: Line number of previous occurrence, or -1 if the header has not been seen before. """ for section_list in self.include_list: for f in section_list: if f[0] == header: return f[1] return -1 def ResetSection(self, directive): """Reset section checking for preprocessor directive. Args: directive: preprocessor directive (e.g. "if", "else"). """ # The name of the current section. self._section = self._INITIAL_SECTION # The path of last found header. self._last_header = '' # Update list of includes. Note that we never pop from the # include list. if directive in ('if', 'ifdef', 'ifndef'): self.include_list.append([]) elif directive in ('else', 'elif'): self.include_list[-1] = [] def SetLastHeader(self, header_path): self._last_header = header_path def CanonicalizeAlphabeticalOrder(self, header_path): """Returns a path canonicalized for alphabetical comparison. - replaces "-" with "_" so they both cmp the same. - removes '-inl' since we don't require them to be after the main header. - lowercase everything, just in case. Args: header_path: Path to be canonicalized. Returns: Canonicalized path. """ return header_path.replace('-inl.h', '.h').replace('-', '_').lower() def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): """Check if a header is in alphabetical order with the previous header. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. header_path: Canonicalized header to be checked. Returns: Returns true if the header is in alphabetical order. """ # If previous section is different from current section, _last_header will # be reset to empty string, so it's always less than current header. # # If previous line was a blank line, assume that the headers are # intentionally sorted the way they are. if (self._last_header > header_path and Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])): return False return True def CheckNextIncludeOrder(self, header_type): """Returns a non-empty error message if the next header is out of order. This function also updates the internal state to be ready to check the next include. Args: header_type: One of the _XXX_HEADER constants defined above. Returns: The empty string if the header is in the right order, or an error message describing what's wrong. """ error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section])) last_section = self._section if header_type == _C_SYS_HEADER: if self._section <= self._C_SECTION: self._section = self._C_SECTION else: self._last_header = '' return error_message elif header_type == _CPP_SYS_HEADER: if self._section <= self._CPP_SECTION: self._section = self._CPP_SECTION else: self._last_header = '' return error_message elif header_type == _LIKELY_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION elif header_type == _POSSIBLE_MY_HEADER: if self._section <= self._MY_H_SECTION: self._section = self._MY_H_SECTION else: # This will always be the fallback because we're not sure # enough that the header is associated with this file. self._section = self._OTHER_H_SECTION else: assert header_type == _OTHER_HEADER self._section = self._OTHER_H_SECTION if last_section != self._section: self._last_header = '' return '' class _CppLintState(object): """Maintains module-wide state..""" def __init__(self): self.verbose_level = 1 # global setting. self.error_count = 0 # global count of reported errors # filters to apply when emitting error messages self.filters = _DEFAULT_FILTERS[:] # backup of filter list. Used to restore the state after each file. self._filters_backup = self.filters[:] self.counting = 'total' # In what way are we counting errors? self.errors_by_category = {} # string to int dict storing error counts # output format: # "emacs" - format that emacs can parse (default) # "vs7" - format that Microsoft Visual Studio 7 can parse self.output_format = 'emacs' def SetOutputFormat(self, output_format): """Sets the output format for errors.""" self.output_format = output_format def SetVerboseLevel(self, level): """Sets the module's verbosity, and returns the previous setting.""" last_verbose_level = self.verbose_level self.verbose_level = level return last_verbose_level def SetCountingStyle(self, counting_style): """Sets the module's counting options.""" self.counting = counting_style def SetFilters(self, filters): """Sets the error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "+whitespace/indent"). Each filter should start with + or -; else we die. Raises: ValueError: The comma-separated filters did not all start with '+' or '-'. E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" """ # Default filters always have less priority than the flag ones. self.filters = _DEFAULT_FILTERS[:] self.AddFilters(filters) def AddFilters(self, filters): """ Adds more filters to the existing list of error-message filters. """ for filt in filters.split(','): clean_filt = filt.strip() if clean_filt: self.filters.append(clean_filt) for filt in self.filters: if not (filt.startswith('+') or filt.startswith('-')): raise ValueError('Every filter in --filters must start with + or -' ' (%s does not)' % filt) def BackupFilters(self): """ Saves the current filter list to backup storage.""" self._filters_backup = self.filters[:] def RestoreFilters(self): """ Restores filters previously backed up.""" self.filters = self._filters_backup[:] def ResetErrorCounts(self): """Sets the module's error statistic back to zero.""" self.error_count = 0 self.errors_by_category = {} def IncrementErrorCount(self, category): """Bumps the module's error statistic.""" self.error_count += 1 if self.counting in ('toplevel', 'detailed'): if self.counting != 'detailed': category = category.split('/')[0] if category not in self.errors_by_category: self.errors_by_category[category] = 0 self.errors_by_category[category] += 1 def PrintErrorCounts(self): """Print a summary of errors by category, and the total.""" for category, count in self.errors_by_category.iteritems(): sys.stderr.write('Category \'%s\' errors found: %d\n' % (category, count)) sys.stderr.write('Total errors found: %d\n' % self.error_count) _cpplint_state = _CppLintState() def _OutputFormat(): """Gets the module's output format.""" return _cpplint_state.output_format def _SetOutputFormat(output_format): """Sets the module's output format.""" _cpplint_state.SetOutputFormat(output_format) def _VerboseLevel(): """Returns the module's verbosity setting.""" return _cpplint_state.verbose_level def _SetVerboseLevel(level): """Sets the module's verbosity, and returns the previous setting.""" return _cpplint_state.SetVerboseLevel(level) def _SetCountingStyle(level): """Sets the module's counting options.""" _cpplint_state.SetCountingStyle(level) def _Filters(): """Returns the module's list of output filters, as a list.""" return _cpplint_state.filters def _SetFilters(filters): """Sets the module's error-message filters. These filters are applied when deciding whether to emit a given error message. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.SetFilters(filters) def _AddFilters(filters): """Adds more filter overrides. Unlike _SetFilters, this function does not reset the current list of filters available. Args: filters: A string of comma-separated filters (eg "whitespace/indent"). Each filter should start with + or -; else we die. """ _cpplint_state.AddFilters(filters) def _BackupFilters(): """ Saves the current filter list to backup storage.""" _cpplint_state.BackupFilters() def _RestoreFilters(): """ Restores filters previously backed up.""" _cpplint_state.RestoreFilters() class _FunctionState(object): """Tracks current function name and the number of lines in its body.""" _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. def __init__(self): self.in_a_function = False self.lines_in_function = 0 self.current_function = '' def Begin(self, function_name): """Start analyzing function body. Args: function_name: The name of the function being tracked. """ self.in_a_function = True self.lines_in_function = 0 self.current_function = function_name def Count(self): """Count line in current function body.""" if self.in_a_function: self.lines_in_function += 1 def Check(self, error, filename, linenum): """Report if too many lines in function body. Args: error: The function to call with any errors found. filename: The name of the current file. linenum: The number of the line to check. """ if not self.in_a_function: return if Match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER trigger = base_trigger * 2**_VerboseLevel() if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 error(filename, linenum, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( self.current_function, self.lines_in_function, trigger)) def End(self): """Stop analyzing function body.""" self.in_a_function = False class _IncludeError(Exception): """Indicates a problem with the include order in a file.""" pass class FileInfo(object): """Provides utility functions for filenames. FileInfo provides easy access to the components of a file's path relative to the project root. """ def __init__(self, filename): self._filename = filename def FullName(self): """Make Windows paths like Unix.""" return os.path.abspath(self._filename).replace('\\', '/') def RepositoryName(self): """FullName after removing the local path to the repository. If we have a real absolute path name here we can try to do something smart: detecting the root of the checkout and truncating /path/to/checkout from the name so that we get header guards that don't include things like "C:\Documents and Settings\..." or "/home/username/..." in them and thus people on different computers who have checked the source out to different locations won't see bogus errors. """ fullname = self.FullName() if os.path.exists(fullname): project_dir = os.path.dirname(fullname) if _project_root: prefix = os.path.commonprefix([_project_root, project_dir]) return fullname[len(prefix) + 1:] if os.path.exists(os.path.join(project_dir, ".svn")): # If there's a .svn file in the current directory, we recursively look # up the directory tree for the top of the SVN checkout root_dir = project_dir one_up_dir = os.path.dirname(root_dir) while os.path.exists(os.path.join(one_up_dir, ".svn")): root_dir = os.path.dirname(root_dir) one_up_dir = os.path.dirname(one_up_dir) prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by # searching up from the current path. root_dir = os.path.dirname(fullname) while (root_dir != os.path.dirname(root_dir) and not os.path.exists(os.path.join(root_dir, ".git")) and not os.path.exists(os.path.join(root_dir, ".hg")) and not os.path.exists(os.path.join(root_dir, ".svn"))): root_dir = os.path.dirname(root_dir) if (os.path.exists(os.path.join(root_dir, ".git")) or os.path.exists(os.path.join(root_dir, ".hg")) or os.path.exists(os.path.join(root_dir, ".svn"))): prefix = os.path.commonprefix([root_dir, project_dir]) return fullname[len(prefix) + 1:] # Don't know what to do; header guard warnings may be wrong... return fullname def Split(self): """Splits the file into the directory, basename, and extension. For 'chrome/browser/browser.cc', Split() would return ('chrome/browser', 'browser', '.cc') Returns: A tuple of (directory, basename, extension). """ googlename = self.RepositoryName() project, rest = os.path.split(googlename) return (project,) + os.path.splitext(rest) def BaseName(self): """File base name - text after the final slash, before the final period.""" return self.Split()[1] def Extension(self): """File extension - text following the final period.""" return self.Split()[2] def NoExtension(self): """File has no source file extension.""" return '/'.join(self.Split()[0:2]) def IsSource(self): """File has a source file extension.""" return _IsSourceExtension(self.Extension()[1:]) def _ShouldPrintError(category, confidence, linenum): """If confidence >= verbose, category passes filter and is not suppressed.""" # There are three ways we might decide not to print an error message: # a "NOLINT(category)" comment appears in the source, # the verbosity level isn't high enough, or the filters filter it out. if IsErrorSuppressedByNolint(category, linenum): return False if confidence < _cpplint_state.verbose_level: return False is_filtered = False for one_filter in _Filters(): if one_filter.startswith('-'): if category.startswith(one_filter[1:]): is_filtered = True elif one_filter.startswith('+'): if category.startswith(one_filter[1:]): is_filtered = False else: assert False # should have been checked for in SetFilter. if is_filtered: return False return True def Error(filename, linenum, category, confidence, message): """Logs the fact we've found a lint error. We log where the error was found, and also our confidence in the error, that is, how certain we are this is a legitimate style regression, and not a misidentification or a use that's sometimes justified. False positives can be suppressed by the use of "cpplint(category)" comments on the offending line. These are parsed into _error_suppressions. Args: filename: The name of the file containing the error. linenum: The number of the line containing the error. category: A string used to describe the "category" this bug falls under: "whitespace", say, or "runtime". Categories may have a hierarchy separated by slashes: "whitespace/indent". confidence: A number from 1-5 representing a confidence score for the error, with 5 meaning that we are certain of the problem, and 1 meaning that it could be a legitimate construct. message: The error message. """ if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) else: sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') # Match a single C style comment on the same line. _RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/' # Matches multi-line C style comments. # This RE is a little bit more complicated than one might expect, because we # have to take care of space removals tools so we can handle comments inside # statements better. # The current rule is: We only clear spaces from both sides when we're at the # end of the line. Otherwise, we try to remove spaces from the right side, # if this doesn't work we try on left side but only if there's a non-character # on the right. _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' + _RE_PATTERN_C_COMMENTS + r'\s+|' + r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' + _RE_PATTERN_C_COMMENTS + r')') def IsCppString(line): """Does line terminate so, that the next symbol is in string constant. This function does not consider single-line nor multi-line comments. Args: line: is a partial line of code starting from the 0..n. Returns: True, if next character appended to 'line' is inside a string constant. """ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 def CleanseRawStrings(raw_lines): """Removes C++11 raw strings from lines. Before: static const char kData[] = R"( multi-line string )"; After: static const char kData[] = "" (replaced by blank line) ""; Args: raw_lines: list of raw lines. Returns: list of lines with C++11 raw strings replaced by empty strings. """ delimiter = None lines_without_raw_strings = [] for line in raw_lines: if delimiter: # Inside a raw string, look for the end end = line.find(delimiter) if end >= 0: # Found the end of the string, match leading space for this # line and resume copying the original lines, and also insert # a "" on the last line. leading_space = Match(r'^(\s*)\S', line) line = leading_space.group(1) + '""' + line[end + len(delimiter):] delimiter = None else: # Haven't found the end yet, append a blank line. line = '""' # Look for beginning of a raw string, and replace them with # empty strings. This is done in a loop to handle multiple raw # strings on the same line. while delimiter is None: # Look for beginning of a raw string. # See 2.14.15 [lex.string] for syntax. # # Once we have matched a raw string, we check the prefix of the # line to make sure that the line is not part of a single line # comment. It's done this way because we remove raw strings # before removing comments as opposed to removing comments # before removing raw strings. This is because there are some # cpplint checks that requires the comments to be preserved, but # we don't want to check comments that are inside raw strings. matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) if (matched and not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//', matched.group(1))): delimiter = ')' + matched.group(2) + '"' end = matched.group(3).find(delimiter) if end >= 0: # Raw string ended on same line line = (matched.group(1) + '""' + matched.group(3)[end + len(delimiter):]) delimiter = None else: # Start of a multi-line raw string line = matched.group(1) + '""' else: break lines_without_raw_strings.append(line) # TODO(unknown): if delimiter is not None here, we might want to # emit a warning for unterminated string. return lines_without_raw_strings def FindNextMultiLineCommentStart(lines, lineix): """Find the beginning marker for a multiline comment.""" while lineix < len(lines): if lines[lineix].strip().startswith('/*'): # Only return this marker if the comment goes beyond this line if lines[lineix].strip().find('*/', 2) < 0: return lineix lineix += 1 return len(lines) def FindNextMultiLineCommentEnd(lines, lineix): """We are inside a comment, find the end marker.""" while lineix < len(lines): if lines[lineix].strip().endswith('*/'): return lineix lineix += 1 return len(lines) def RemoveMultiLineCommentsFromRange(lines, begin, end): """Clears a range of lines for multi-line comments.""" # Having // dummy comments makes the lines non-empty, so we will not get # unnecessary blank line warnings later in the code. for i in range(begin, end): lines[i] = '/**/' def RemoveMultiLineComments(filename, lines, error): """Removes multiline (c-style) comments from lines.""" lineix = 0 while lineix < len(lines): lineix_begin = FindNextMultiLineCommentStart(lines, lineix) if lineix_begin >= len(lines): return lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) if lineix_end >= len(lines): error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, 'Could not find end of multi-line comment') return RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) lineix = lineix_end + 1 def CleanseComments(line): """Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed. """ commentpos = line.find('//') if commentpos != -1 and not IsCppString(line[:commentpos]): line = line[:commentpos].rstrip() # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) class CleansedLines(object): """Holds 4 copies of all lines with different preprocessing applied to them. 1) elided member contains lines without strings and comments. 2) lines member contains lines without comments. 3) raw_lines member contains all the lines without processing. 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw strings removed. All these members are of <type 'list'>, and of the same length. """ def __init__(self, lines): self.elided = [] self.lines = [] self.raw_lines = lines self.num_lines = len(lines) self.lines_without_raw_strings = CleanseRawStrings(lines) for linenum in range(len(self.lines_without_raw_strings)): self.lines.append(CleanseComments( self.lines_without_raw_strings[linenum])) elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) self.elided.append(CleanseComments(elided)) def NumLines(self): """Returns the number of lines represented.""" return self.num_lines @staticmethod def _CollapseStrings(elided): """Collapses strings and chars on a line to simple "" or '' blocks. We nix strings first so we're not fooled by text like '"http://"' Args: elided: The line being processed. Returns: The line with collapsed strings. """ if _RE_PATTERN_INCLUDE.match(elided): return elided # Remove escaped characters first to make quote/single quote collapsing # basic. Things that look like escaped characters shouldn't occur # outside of strings and chars. elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) # Replace quoted strings and digit separators. Both single quotes # and double quotes are processed in the same loop, otherwise # nested quotes wouldn't work. collapsed = '' while True: # Find the first quote character match = Match(r'^([^\'"]*)([\'"])(.*)$', elided) if not match: collapsed += elided break head, quote, tail = match.groups() if quote == '"': # Collapse double quoted strings second_quote = tail.find('"') if second_quote >= 0: collapsed += head + '""' elided = tail[second_quote + 1:] else: # Unmatched double quote, don't bother processing the rest # of the line since this is probably a multiline string. collapsed += elided break else: # Found single quote, check nearby text to eliminate digit separators. # # There is no special handling for floating point here, because # the integer/fractional/exponent parts would all be parsed # correctly as long as there are digits on both sides of the # separator. So we are fine as long as we don't see something # like "0.'3" (gcc 4.9.0 will not allow this literal). if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head): match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail) collapsed += head + match_literal.group(1).replace("'", '') elided = match_literal.group(2) else: second_quote = tail.find('\'') if second_quote >= 0: collapsed += head + "''" elided = tail[second_quote + 1:] else: # Unmatched single quote collapsed += elided break return collapsed def FindEndOfExpressionInLine(line, startpos, stack): """Find the position just after the end of current parenthesized expression. Args: line: a CleansedLines line. startpos: start searching at this position. stack: nesting stack at startpos. Returns: On finding matching end: (index just after matching end, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at end of this line) """ for i in xrange(startpos, len(line)): char = line[i] if char in '([{': # Found start of parenthesized expression, push to expression stack stack.append(char) elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator if stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) elif i > 0 and Search(r'\boperator\s*$', line[0:i]): # operator<, don't add to stack continue else: # Tentative start of template argument list stack.append('<') elif char in ')]}': # Found end of parenthesized expression. # # If we are currently expecting a matching '>', the pending '<' # must have been an operator. Remove them from expression stack. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) if ((stack[-1] == '(' and char == ')') or (stack[-1] == '[' and char == ']') or (stack[-1] == '{' and char == '}')): stack.pop() if not stack: return (i + 1, None) else: # Mismatched parentheses return (-1, None) elif char == '>': # Found potential end of template argument list. # Ignore "->" and operator functions if (i > 0 and (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))): continue # Pop the stack if there is a matching '<'. Otherwise, ignore # this '>' since it must be an operator. if stack: if stack[-1] == '<': stack.pop() if not stack: return (i + 1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '>', the matching '<' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '<': stack.pop() if not stack: return (-1, None) # Did not find end of expression or unbalanced parentheses on this line return (-1, stack) def CloseExpression(clean_lines, linenum, pos): """If input points to ( or { or [ or <, finds the position that closes it. If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the linenum/pos that correspond to the closing of the expression. TODO(unknown): cpplint spends a fair bit of time matching parentheses. Ideally we would want to index all opening and closing parentheses once and have CloseExpression be just a simple lookup, but due to preprocessor tricks, this is not so easy. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *past* the closing brace, or (line, len(lines), -1) if we never find a close. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]): return (line, clean_lines.NumLines(), -1) # Check first line (end_pos, stack) = FindEndOfExpressionInLine(line, pos, []) if end_pos > -1: return (line, linenum, end_pos) # Continue scanning forward while stack and linenum < clean_lines.NumLines() - 1: linenum += 1 line = clean_lines.elided[linenum] (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack) if end_pos > -1: return (line, linenum, end_pos) # Did not find end of expression before end of file, give up return (line, clean_lines.NumLines(), -1) def FindStartOfExpressionInLine(line, endpos, stack): """Find position at the matching start of current expression. This is almost the reverse of FindEndOfExpressionInLine, but note that the input position and returned position differs by 1. Args: line: a CleansedLines line. endpos: start searching at this position. stack: nesting stack at endpos. Returns: On finding matching start: (index at matching start, None) On finding an unclosed expression: (-1, None) Otherwise: (-1, new stack at beginning of this line) """ i = endpos while i >= 0: char = line[i] if char in ')]}': # Found end of expression, push to expression stack stack.append(char) elif char == '>': # Found potential end of template argument list. # # Ignore it if it's a "->" or ">=" or "operator>" if (i > 0 and (line[i - 1] == '-' or Match(r'\s>=\s', line[i - 1:]) or Search(r'\boperator\s*$', line[0:i]))): i -= 1 else: stack.append('>') elif char == '<': # Found potential start of template argument list if i > 0 and line[i - 1] == '<': # Left shift operator i -= 1 else: # If there is a matching '>', we can pop the expression stack. # Otherwise, ignore this '<' since it must be an operator. if stack and stack[-1] == '>': stack.pop() if not stack: return (i, None) elif char in '([{': # Found start of expression. # # If there are any unmatched '>' on the stack, they must be # operators. Remove those. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) if ((char == '(' and stack[-1] == ')') or (char == '[' and stack[-1] == ']') or (char == '{' and stack[-1] == '}')): stack.pop() if not stack: return (i, None) else: # Mismatched parentheses return (-1, None) elif char == ';': # Found something that look like end of statements. If we are currently # expecting a '<', the matching '>' must have been an operator, since # template argument list should not contain statements. while stack and stack[-1] == '>': stack.pop() if not stack: return (-1, None) i -= 1 return (-1, stack) def ReverseCloseExpression(clean_lines, linenum, pos): """If input points to ) or } or ] or >, finds the position that opens it. If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the linenum/pos that correspond to the opening of the expression. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *at* the opening brace, or (line, 0, -1) if we never find the matching opening brace. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum. """ line = clean_lines.elided[linenum] if line[pos] not in ')}]>': return (line, 0, -1) # Check last line (start_pos, stack) = FindStartOfExpressionInLine(line, pos, []) if start_pos > -1: return (line, linenum, start_pos) # Continue scanning backward while stack and linenum > 0: linenum -= 1 line = clean_lines.elided[linenum] (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack) if start_pos > -1: return (line, linenum, start_pos) # Did not find start of expression before beginning of file, give up return (line, 0, -1) def CheckForCopyright(filename, lines, error): """Logs an error if no Copyright message appears at the top of the file.""" # We'll say it should occur by line 10. Don't forget there's a # dummy line at the front. for line in xrange(1, min(len(lines), 11)): if re.search(r'Copyright', lines[line], re.I): break else: # means no copyright line was found error(filename, 0, 'legal/copyright', 5, 'No copyright message found. ' 'You should have a line: "Copyright [year] <Copyright Owner>"') def GetIndentLevel(line): """Return the number of leading spaces in line. Args: line: A string to check. Returns: An integer count of leading spaces, possibly zero. """ indent = Match(r'^( *)\S', line) if indent: return len(indent.group(1)) else: return 0 def GetHeaderGuardCPPVariable(filename): """Returns the CPP variable that should be used as a header guard. Args: filename: The name of a C++ header file. Returns: The CPP variable that should be used as a header guard in the named file. """ # Restores original filename in case that cpplint is invoked from Emacs's # flymake. filename = re.sub(r'_flymake\.h$', '.h', filename) filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) # Replace 'c++' with 'cpp'. filename = filename.replace('C++', 'cpp').replace('c++', 'cpp') fileinfo = FileInfo(filename) file_path_from_root = fileinfo.RepositoryName() if _root: file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_' def CheckForHeaderGuard(filename, clean_lines, error): """Checks that the file contains a header guard. Logs an error if no #ifndef header guard is present. For other headers, checks that the full pathname is used. Args: filename: The name of the C++ header file. clean_lines: A CleansedLines instance containing the file. error: The function to call with any errors found. """ # Don't check for header guards if there are error suppression # comments somewhere in this file. # # Because this is silencing a warning for a nonexistent line, we # only support the very specific NOLINT(build/header_guard) syntax, # and not the general NOLINT or NOLINT(*) syntax. raw_lines = clean_lines.lines_without_raw_strings for i in raw_lines: if Search(r'//\s*NOLINT\(build/header_guard\)', i): return cppvar = GetHeaderGuardCPPVariable(filename) ifndef = '' ifndef_linenum = 0 define = '' endif = '' endif_linenum = 0 for linenum, line in enumerate(raw_lines): linesplit = line.split() if len(linesplit) >= 2: # find the first occurrence of #ifndef and #define, save arg if not ifndef and linesplit[0] == '#ifndef': # set ifndef to the header guard presented on the #ifndef line. ifndef = linesplit[1] ifndef_linenum = linenum if not define and linesplit[0] == '#define': define = linesplit[1] # find the last occurrence of #endif, save entire line if line.startswith('#endif'): endif = line endif_linenum = linenum if not ifndef or not define or ifndef != define: error(filename, 0, 'build/header_guard', 5, 'No #ifndef header guard found, suggested CPP variable is: %s' % cppvar) return # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ # for backward compatibility. if ifndef != cppvar: error_level = 0 if ifndef != cppvar + '_': error_level = 5 ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum, error) error(filename, ifndef_linenum, 'build/header_guard', error_level, '#ifndef header guard has wrong style, please use: %s' % cppvar) # Check for "//" comments on endif line. ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum, error) match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif) if match: if match.group(1) == '_': # Issue low severity warning for deprecated double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, '#endif line should be "#endif // %s"' % cppvar) return # Didn't find the corresponding "//" comment. If this file does not # contain any "//" comments at all, it could be that the compiler # only wants "/**/" comments, look for those instead. no_single_line_comments = True for i in xrange(1, len(raw_lines) - 1): line = raw_lines[i] if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line): no_single_line_comments = False break if no_single_line_comments: match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif) if match: if match.group(1) == '_': # Low severity warning for double trailing underscore error(filename, endif_linenum, 'build/header_guard', 0, '#endif line should be "#endif /* %s */"' % cppvar) return # Didn't find anything error(filename, endif_linenum, 'build/header_guard', 5, '#endif line should be "#endif // %s"' % cppvar) def CheckHeaderFileIncluded(filename, include_state, error): """Logs an error if a .cc file does not include its header.""" # Do not check test files fileinfo = FileInfo(filename) if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()): return headerfile = filename[0:len(filename) - len(fileinfo.Extension())] + '.h' if not os.path.exists(headerfile): return headername = FileInfo(headerfile).RepositoryName() first_include = 0 for section_list in include_state.include_list: for f in section_list: if headername in f[0] or f[0] in headername: return if not first_include: first_include = f[1] error(filename, first_include, 'build/include', 5, '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)) def CheckForBadCharacters(filename, lines, error): """Logs an error for each line containing bad characters. Two kinds of bad characters: 1. Unicode replacement characters: These indicate that either the file contained invalid UTF-8 (likely) or Unicode replacement characters (which it shouldn't). Note that it's possible for this to throw off line numbering if the invalid UTF-8 occurred adjacent to a newline. 2. NUL bytes. These are problematic for some tools. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for linenum, line in enumerate(lines): if u'\ufffd' in line: error(filename, linenum, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') if '\0' in line: error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') def CheckForNewlineAtEOF(filename, lines, error): """Logs an error if there is no newline char at the end of the file. Args: filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ # The array lines() was created by adding two newlines to the # original file (go figure), then splitting on \n. # To verify that the file ends in \n, we just have to make sure the # last-but-two element of lines() exists and is empty. if len(lines) < 3 or lines[-2]: error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.') def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): """Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. Otherwise, we prefer // comments, so it's ok to warn about the other. Likewise, it's ok for strings to extend across multiple lines, as long as a line continuation character (backslash) terminates each line. Although not currently prohibited by the C++ style guide, it's ugly and unnecessary. We don't do well with either in this lint program, so we warn about both. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remove all \\ (escaped backslashes) from the line. They are OK, and the # second (escaped) slash may trigger later \" detection erroneously. line = line.replace('\\\\', '') if line.count('/*') > line.count('*/'): error(filename, linenum, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. ' 'Consider replacing these with //-style comments, ' 'with #if 0...#endif, ' 'or with more clearly structured multi-line comments.') if (line.count('"') - line.count('\\"')) % 2: error(filename, linenum, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t ' 'do well with such strings, and may give bogus warnings. ' 'Use C++11 raw strings or concatenation instead.') # (non-threadsafe name, thread-safe alternative, validation pattern) # # The validation pattern is used to eliminate false positives such as: # _rand(); // false positive due to substring match. # ->rand(); // some member function rand(). # ACMRandom rand(seed); // some variable named rand. # ISAACRandom rand(); // another variable named rand. # # Basically we require the return value of these functions to be used # in some expression context on the same line by matching on some # operator before the function name. This eliminates constructors and # member function calls. _UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)' _THREADING_LIST = ( ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'), ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'), ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'), ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'), ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'), ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'), ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'), ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'), ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'), ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'), ('strtok(', 'strtok_r(', _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'), ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'), ) def CheckPosixThreading(filename, clean_lines, linenum, error): """Checks for calls to thread-unsafe functions. Much code has been originally written without consideration of multi-threading. Also, engineers are relying on their old experience; they have learned posix before threading extensions were added. These tests guide the engineers to use thread-safe functions (when using posix directly). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST: # Additional pattern matching check to confirm that this is the # function we are looking for if Search(pattern, line): error(filename, linenum, 'runtime/threadsafe_fn', 2, 'Consider using ' + multithread_safe_func + '...) instead of ' + single_thread_func + '...) for improved thread safety.') def CheckVlogArguments(filename, clean_lines, linenum, error): """Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): error(filename, linenum, 'runtime/vlog', 5, 'VLOG() should be used with numeric verbosity level. ' 'Use LOG() if you want symbolic severity levels.') # Matches invalid increment: *count++, which moves pointer instead of # incrementing a value. _RE_PATTERN_INVALID_INCREMENT = re.compile( r'^\s*\*\w+(\+\+|--);') def CheckInvalidIncrement(filename, clean_lines, linenum, error): """Checks for invalid increment *count++. For example following function: void increment_counter(int* count) { *count++; } is invalid, because it effectively does count++, moving pointer, and should be replaced with ++*count, (*count)++ or *count += 1. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] if _RE_PATTERN_INVALID_INCREMENT.match(line): error(filename, linenum, 'runtime/invalid_increment', 5, 'Changing pointer instead of value (or unused value of operator*).') def IsMacroDefinition(clean_lines, linenum): if Search(r'^#define', clean_lines[linenum]): return True if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]): return True return False def IsForwardClassDeclaration(clean_lines, linenum): return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum]) class _BlockInfo(object): """Stores information about a generic block of code.""" def __init__(self, linenum, seen_open_brace): self.starting_linenum = linenum self.seen_open_brace = seen_open_brace self.open_parentheses = 0 self.inline_asm = _NO_ASM self.check_namespace_indentation = False def CheckBegin(self, filename, clean_lines, linenum, error): """Run checks that applies to text up to the opening brace. This is mostly for checking the text after the class identifier and the "{", usually where the base class is specified. For other blocks, there isn't much to check, so we always pass. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass def CheckEnd(self, filename, clean_lines, linenum, error): """Run checks that applies to text after the closing brace. This is mostly used for checking end of namespace comments. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ pass def IsBlockInfo(self): """Returns true if this block is a _BlockInfo. This is convenient for verifying that an object is an instance of a _BlockInfo, but not an instance of any of the derived classes. Returns: True for this class, False for derived classes. """ return self.__class__ == _BlockInfo class _ExternCInfo(_BlockInfo): """Stores information about an 'extern "C"' block.""" def __init__(self, linenum): _BlockInfo.__init__(self, linenum, True) class _ClassInfo(_BlockInfo): """Stores information about a class.""" def __init__(self, name, class_or_struct, clean_lines, linenum): _BlockInfo.__init__(self, linenum, False) self.name = name self.is_derived = False self.check_namespace_indentation = True if class_or_struct == 'struct': self.access = 'public' self.is_struct = True else: self.access = 'private' self.is_struct = False # Remember initial indentation level for this class. Using raw_lines here # instead of elided to account for leading comments. self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum]) # Try to find the end of the class. This will be confused by things like: # class A { # } *x = { ... # # But it's still good enough for CheckSectionSpacing. self.last_line = 0 depth = 0 for i in range(linenum, clean_lines.NumLines()): line = clean_lines.elided[i] depth += line.count('{') - line.count('}') if not depth: self.last_line = i break def CheckBegin(self, filename, clean_lines, linenum, error): # Look for a bare ':' if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): self.is_derived = True def CheckEnd(self, filename, clean_lines, linenum, error): # If there is a DISALLOW macro, it should appear near the end of # the class. seen_last_thing_in_class = False for i in xrange(linenum - 1, self.starting_linenum, -1): match = Search( r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' + self.name + r'\)', clean_lines.elided[i]) if match: if seen_last_thing_in_class: error(filename, i, 'readability/constructors', 3, match.group(1) + ' should be the last thing in the class') break if not Match(r'^\s*$', clean_lines.elided[i]): seen_last_thing_in_class = True # Check that closing brace is aligned with beginning of the class. # Only do this if the closing brace is indented by only whitespaces. # This means we will not check single-line class definitions. indent = Match(r'^( *)\}', clean_lines.elided[linenum]) if indent and len(indent.group(1)) != self.class_indent: if self.is_struct: parent = 'struct ' + self.name else: parent = 'class ' + self.name error(filename, linenum, 'whitespace/indent', 3, 'Closing brace should be aligned with beginning of %s' % parent) class _NamespaceInfo(_BlockInfo): """Stores information about a namespace.""" def __init__(self, name, linenum): _BlockInfo.__init__(self, linenum, False) self.name = name or '' self.check_namespace_indentation = True def CheckEnd(self, filename, clean_lines, linenum, error): """Check end of namespace comments.""" line = clean_lines.raw_lines[linenum] # Check how many lines is enclosed in this namespace. Don't issue # warning for missing namespace comments if there aren't enough # lines. However, do apply checks if there is already an end of # namespace comment and it's incorrect. # # TODO(unknown): We always want to check end of namespace comments # if a namespace is large, but sometimes we also want to apply the # check if a short namespace contained nontrivial things (something # other than forward declarations). There is currently no logic on # deciding what these nontrivial things are, so this check is # triggered by namespace size only, which works most of the time. if (linenum - self.starting_linenum < 10 and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)): return # Look for matching comment at end of namespace. # # Note that we accept C style "/* */" comments for terminating # namespaces, so that code that terminate namespaces inside # preprocessor macros can be cpplint clean. # # We also accept stuff like "// end of namespace <name>." with the # period at the end. # # Besides these, we don't accept anything else, otherwise we might # get false negatives when existing comment is a substring of the # expected namespace. if self.name: # Named namespace if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + r'[\*/\.\\\s]*$'), line): error(filename, linenum, 'readability/namespace', 5, 'Namespace should be terminated with "// namespace %s"' % self.name) else: # Anonymous namespace if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): # If "// namespace anonymous" or "// anonymous namespace (more text)", # mention "// anonymous namespace" as an acceptable form if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line): error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"' ' or "// anonymous namespace"') else: error(filename, linenum, 'readability/namespace', 5, 'Anonymous namespace should be terminated with "// namespace"') class _PreprocessorInfo(object): """Stores checkpoints of nesting stacks when #if/#else is seen.""" def __init__(self, stack_before_if): # The entire nesting stack before #if self.stack_before_if = stack_before_if # The entire nesting stack up to #else self.stack_before_else = [] # Whether we have already seen #else or #elif self.seen_else = False class NestingState(object): """Holds states related to parsing braces.""" def __init__(self): # Stack for tracking all braces. An object is pushed whenever we # see a "{", and popped when we see a "}". Only 3 types of # objects are possible: # - _ClassInfo: a class or struct. # - _NamespaceInfo: a namespace. # - _BlockInfo: some other type of block. self.stack = [] # Top of the previous stack before each Update(). # # Because the nesting_stack is updated at the end of each line, we # had to do some convoluted checks to find out what is the current # scope at the beginning of the line. This check is simplified by # saving the previous top of nesting stack. # # We could save the full stack, but we only need the top. Copying # the full nesting stack would slow down cpplint by ~10%. self.previous_stack_top = [] # Stack of _PreprocessorInfo objects. self.pp_stack = [] def SeenOpenBrace(self): """Check if we have seen the opening brace for the innermost block. Returns: True if we have seen the opening brace, False if the innermost block is still expecting an opening brace. """ return (not self.stack) or self.stack[-1].seen_open_brace def InNamespaceBody(self): """Check if we are currently one level inside a namespace body. Returns: True if top of the stack is a namespace block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _NamespaceInfo) def InExternC(self): """Check if we are currently one level inside an 'extern "C"' block. Returns: True if top of the stack is an extern block, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ExternCInfo) def InClassDeclaration(self): """Check if we are currently one level inside a class or struct declaration. Returns: True if top of the stack is a class/struct, False otherwise. """ return self.stack and isinstance(self.stack[-1], _ClassInfo) def InAsmBlock(self): """Check if we are currently one level inside an inline ASM block. Returns: True if the top of the stack is a block containing inline ASM. """ return self.stack and self.stack[-1].inline_asm != _NO_ASM def InTemplateArgumentList(self, clean_lines, linenum, pos): """Check if current position is inside template argument list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: position just after the suspected template argument. Returns: True if (linenum, pos) is inside template arguments. """ while linenum < clean_lines.NumLines(): # Find the earliest character that might indicate a template argument line = clean_lines.elided[linenum] match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:]) if not match: linenum += 1 pos = 0 continue token = match.group(1) pos += len(match.group(0)) # These things do not look like template argument list: # class Suspect { # class Suspect x; } if token in ('{', '}', ';'): return False # These things look like template argument list: # template <class Suspect> # template <class Suspect = default_value> # template <class Suspect[]> # template <class Suspect...> if token in ('>', '=', '[', ']', '.'): return True # Check if token is an unmatched '<'. # If not, move on to the next character. if token != '<': pos += 1 if pos >= len(line): linenum += 1 pos = 0 continue # We can't be sure if we just find a single '<', and need to # find the matching '>'. (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1) if end_pos < 0: # Not sure if template argument list or syntax error in file return False linenum = end_line pos = end_pos return False def UpdatePreprocessor(self, line): """Update preprocessor stack. We need to handle preprocessors due to classes like this: #ifdef SWIG struct ResultDetailsPageElementExtensionPoint { #else struct ResultDetailsPageElementExtensionPoint : public Extension { #endif We make the following assumptions (good enough for most files): - Preprocessor condition evaluates to true from #if up to first #else/#elif/#endif. - Preprocessor condition evaluates to false from #else/#elif up to #endif. We still perform lint checks on these lines, but these do not affect nesting stack. Args: line: current line to check. """ if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): # Beginning of #if block, save the nesting stack here. The saved # stack will allow us to restore the parsing state in the #else case. self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) elif Match(r'^\s*#\s*(else|elif)\b', line): # Beginning of #else block if self.pp_stack: if not self.pp_stack[-1].seen_else: # This is the first #else or #elif block. Remember the # whole nesting stack up to this point. This is what we # keep after the #endif. self.pp_stack[-1].seen_else = True self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) # Restore the stack to how it was before the #if self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) else: # TODO(unknown): unexpected #else, issue warning? pass elif Match(r'^\s*#\s*endif\b', line): # End of #if or #else blocks. if self.pp_stack: # If we saw an #else, we will need to restore the nesting # stack to its former state before the #else, otherwise we # will just continue from where we left off. if self.pp_stack[-1].seen_else: # Here we can just use a shallow copy since we are the last # reference to it. self.stack = self.pp_stack[-1].stack_before_else # Drop the corresponding #if self.pp_stack.pop() else: # TODO(unknown): unexpected #endif, issue warning? pass # TODO(unknown): Update() is too long, but we will refactor later. def Update(self, filename, clean_lines, linenum, error): """Update nesting state with current line. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Remember top of the previous nesting stack. # # The stack is always pushed/popped and not modified in place, so # we can just do a shallow copy instead of copy.deepcopy. Using # deepcopy would slow down cpplint by ~28%. if self.stack: self.previous_stack_top = self.stack[-1] else: self.previous_stack_top = None # Update pp_stack self.UpdatePreprocessor(line) # Count parentheses. This is to avoid adding struct arguments to # the nesting stack. if self.stack: inner_block = self.stack[-1] depth_change = line.count('(') - line.count(')') inner_block.open_parentheses += depth_change # Also check if we are starting or ending an inline assembly block. if inner_block.inline_asm in (_NO_ASM, _END_ASM): if (depth_change != 0 and inner_block.open_parentheses == 1 and _MATCH_ASM.match(line)): # Enter assembly block inner_block.inline_asm = _INSIDE_ASM else: # Not entering assembly block. If previous line was _END_ASM, # we will now shift to _NO_ASM state. inner_block.inline_asm = _NO_ASM elif (inner_block.inline_asm == _INSIDE_ASM and inner_block.open_parentheses == 0): # Exit assembly block inner_block.inline_asm = _END_ASM # Consume namespace declaration at the beginning of the line. Do # this in a loop so that we catch same line declarations like this: # namespace proto2 { namespace bridge { class MessageSet; } } while True: # Match start of namespace. The "\b\s*" below catches namespace # declarations even if it weren't followed by a whitespace, this # is so that we don't confuse our namespace checker. The # missing spaces will be flagged by CheckSpacing. namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) if not namespace_decl_match: break new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) self.stack.append(new_namespace) line = namespace_decl_match.group(2) if line.find('{') != -1: new_namespace.seen_open_brace = True line = line[line.find('{') + 1:] # Look for a class declaration in whatever is left of the line # after parsing namespaces. The regexp accounts for decorated classes # such as in: # class LOCKABLE API Object { # }; class_decl_match = Match( r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?' r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))' r'(.*)$', line) if (class_decl_match and (not self.stack or self.stack[-1].open_parentheses == 0)): # We do not want to accept classes that are actually template arguments: # template <class Ignore1, # class Ignore2 = Default<Args>, # template <Args> class Ignore3> # void Function() {}; # # To avoid template argument cases, we scan forward and look for # an unmatched '>'. If we see one, assume we are inside a # template argument list. end_declaration = len(class_decl_match.group(1)) if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration): self.stack.append(_ClassInfo( class_decl_match.group(3), class_decl_match.group(2), clean_lines, linenum)) line = class_decl_match.group(4) # If we have not yet seen the opening brace for the innermost block, # run checks here. if not self.SeenOpenBrace(): self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) # Update access control if we are inside a class/struct if self.stack and isinstance(self.stack[-1], _ClassInfo): classinfo = self.stack[-1] access_match = Match( r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' r':(?:[^:]|$)', line) if access_match: classinfo.access = access_match.group(2) # Check that access keywords are indented +1 space. Skip this # check if the keywords are not preceded by whitespaces. indent = access_match.group(1) if (len(indent) != classinfo.class_indent + 1 and Match(r'^\s*$', indent)): if classinfo.is_struct: parent = 'struct ' + classinfo.name else: parent = 'class ' + classinfo.name slots = '' if access_match.group(3): slots = access_match.group(3) error(filename, linenum, 'whitespace/indent', 3, '%s%s: should be indented +1 space inside %s' % ( access_match.group(2), slots, parent)) # Consume braces or semicolons from what's left of the line while True: # Match first brace, semicolon, or closed parenthesis. matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) if not matched: break token = matched.group(1) if token == '{': # If namespace or class hasn't seen a opening brace yet, mark # namespace/class head as complete. Push a new block onto the # stack otherwise. if not self.SeenOpenBrace(): self.stack[-1].seen_open_brace = True elif Match(r'^extern\s*"[^"]*"\s*\{', line): self.stack.append(_ExternCInfo(linenum)) else: self.stack.append(_BlockInfo(linenum, True)) if _MATCH_ASM.match(line): self.stack[-1].inline_asm = _BLOCK_ASM elif token == ';' or token == ')': # If we haven't seen an opening brace yet, but we already saw # a semicolon, this is probably a forward declaration. Pop # the stack for these. # # Similarly, if we haven't seen an opening brace yet, but we # already saw a closing parenthesis, then these are probably # function arguments with extra "class" or "struct" keywords. # Also pop these stack for these. if not self.SeenOpenBrace(): self.stack.pop() else: # token == '}' # Perform end of block checks and pop the stack. if self.stack: self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) self.stack.pop() line = matched.group(2) def InnermostClass(self): """Get class info on the top of the stack. Returns: A _ClassInfo object if we are inside a class, or None otherwise. """ for i in range(len(self.stack), 0, -1): classinfo = self.stack[i - 1] if isinstance(classinfo, _ClassInfo): return classinfo return None def CheckCompletedBlocks(self, filename, error): """Checks that all classes and namespaces have been completely parsed. Call this when all lines in a file have been processed. Args: filename: The name of the current file. error: The function to call with any errors found. """ # Note: This test can result in false positives if #ifdef constructs # get in the way of brace matching. See the testBuildClass test in # cpplint_unittest.py for an example of this. for obj in self.stack: if isinstance(obj, _ClassInfo): error(filename, obj.starting_linenum, 'build/class', 5, 'Failed to find complete declaration of class %s' % obj.name) elif isinstance(obj, _NamespaceInfo): error(filename, obj.starting_linenum, 'build/namespaces', 5, 'Failed to find complete declaration of namespace %s' % obj.name) def CheckForNonStandardConstructs(filename, clean_lines, linenum, nesting_state, error): r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. Complain about several constructs which gcc-2 accepts, but which are not standard C++. Warning about these in lint is one way to ease the transition to new compilers. - put storage class first (e.g. "static const" instead of "const static"). - "%lld" instead of %qd" in printf-type functions. - "%1$d" is non-standard in printf-type functions. - "\%" is an undefined character escape sequence. - text after #endif is not allowed. - invalid inner-style forward declaration. - >? and <? operators, and their >?= and <?= cousins. Additionally, check for constructor/destructor style violations and reference members, as it is very convenient to do so while checking for gcc-2 compliance. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message """ # Remove comments from the line, but leave in strings for now. line = clean_lines.lines[linenum] if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): error(filename, linenum, 'runtime/printf_format', 3, '%q in format strings is deprecated. Use %ll instead.') if Search(r'printf\s*\(.*".*%\d+\$', line): error(filename, linenum, 'runtime/printf_format', 2, '%N$ formats are unconventional. Try rewriting to avoid them.') # Remove escaped backslashes before looking for undefined escapes. line = line.replace('\\\\', '') if Search(r'("|\').*\\(%|\[|\(|{)', line): error(filename, linenum, 'build/printf_format', 3, '%, [, (, and { are undefined character escapes. Unescape them.') # For the rest, work with both comments and strings removed. line = clean_lines.elided[linenum] if Search(r'\b(const|volatile|void|char|short|int|long' r'|float|double|signed|unsigned' r'|schar|u?int8|u?int16|u?int32|u?int64)' r'\s+(register|static|extern|typedef)\b', line): error(filename, linenum, 'build/storage_class', 5, 'Storage-class specifier (static, extern, typedef, etc) should be ' 'at the beginning of the declaration.') if Match(r'\s*#\s*endif\s*[^/\s]+', line): error(filename, linenum, 'build/endif_comment', 5, 'Uncommented text after #endif is non-standard. Use a comment.') if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): error(filename, linenum, 'build/forward_decl', 5, 'Inner-style forward declarations are invalid. Remove this line.') if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): error(filename, linenum, 'build/deprecated', 3, '>? and <? (max and min) operators are non-standard and deprecated.') if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line): # TODO(unknown): Could it be expanded safely to arbitrary references, # without triggering too many false positives? The first # attempt triggered 5 warnings for mostly benign code in the regtest, hence # the restriction. # Here's the original regexp, for the reference: # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?' # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' error(filename, linenum, 'runtime/member_string_references', 2, 'const string& members are dangerous. It is much better to use ' 'alternatives, such as pointers or simple constants.') # Everything else in this function operates on class declarations. # Return early if the top of the nesting stack is not a class, or if # the class head is not completed yet. classinfo = nesting_state.InnermostClass() if not classinfo or not classinfo.seen_open_brace: return # The class may have been declared with namespace or classname qualifiers. # The constructor and destructor will not have those qualifiers. base_classname = classinfo.name.split('::')[-1] # Look for single-argument constructors that aren't marked explicit. # Technically a valid construct, but against style. explicit_constructor_match = Match( r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*' r'\(((?:[^()]|\([^()]*\))*)\)' % re.escape(base_classname), line) if explicit_constructor_match: is_marked_explicit = explicit_constructor_match.group(1) if not explicit_constructor_match.group(2): constructor_args = [] else: constructor_args = explicit_constructor_match.group(2).split(',') # collapse arguments so that commas in template parameter lists and function # argument parameter lists don't split arguments in two i = 0 while i < len(constructor_args): constructor_arg = constructor_args[i] while (constructor_arg.count('<') > constructor_arg.count('>') or constructor_arg.count('(') > constructor_arg.count(')')): constructor_arg += ',' + constructor_args[i + 1] del constructor_args[i + 1] constructor_args[i] = constructor_arg i += 1 defaulted_args = [arg for arg in constructor_args if '=' in arg] noarg_constructor = (not constructor_args or # empty arg list # 'void' arg specifier (len(constructor_args) == 1 and constructor_args[0].strip() == 'void')) onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg not noarg_constructor) or # all but at most one arg defaulted (len(constructor_args) >= 1 and not noarg_constructor and len(defaulted_args) >= len(constructor_args) - 1)) initializer_list_constructor = bool( onearg_constructor and Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0])) copy_constructor = bool( onearg_constructor and Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&' % re.escape(base_classname), constructor_args[0].strip())) if (not is_marked_explicit and onearg_constructor and not initializer_list_constructor and not copy_constructor): if defaulted_args: error(filename, linenum, 'runtime/explicit', 5, 'Constructors callable with one argument ' 'should be marked explicit.') else: error(filename, linenum, 'runtime/explicit', 5, 'Single-parameter constructors should be marked explicit.') elif is_marked_explicit and not onearg_constructor: if noarg_constructor: error(filename, linenum, 'runtime/explicit', 5, 'Zero-parameter constructors should not be marked explicit.') def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error): """Checks for the correctness of various spacing around function calls. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Since function calls often occur inside if/for/while/switch # expressions - which have their own, more liberal conventions - we # first see if we should be looking inside such an expression for a # function call, to which we can apply more strict standards. fncall = line # if there's no control flow construct, look at whole line for pattern in (r'\bif\s*\((.*)\)\s*{', r'\bfor\s*\((.*)\)\s*{', r'\bwhile\s*\((.*)\)\s*[{;]', r'\bswitch\s*\((.*)\)\s*{'): match = Search(pattern, line) if match: fncall = match.group(1) # look inside the parens for function calls break # Except in if/for/while/switch, there should never be space # immediately inside parens (eg "f( 3, 4 )"). We make an exception # for nested parens ( (a+b) + c ). Likewise, there should never be # a space before a ( when it's a function argument. I assume it's a # function argument when the char before the whitespace is legal in # a function name (alnum + _) and we're not starting a macro. Also ignore # pointers and references to arrays and functions coz they're too tricky: # we use a very simple way to recognize these: # " (something)(maybe-something)" or # " (something)(maybe-something," or # " (something)[something]" # Note that we assume the contents of [] to be short enough that # they'll never need to wrap. if ( # Ignore control structures. not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', fncall) and # Ignore pointers/references to functions. not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and # Ignore pointers/references to arrays. not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call error(filename, linenum, 'whitespace/parens', 4, 'Extra space after ( in function call') elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): error(filename, linenum, 'whitespace/parens', 2,
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and not Search(r'\bcase\s+\(', fncall)): # TODO(unknown): Space after an operator function seem to be a common # error, silence those for now by restricting them to highest verbosity. if Search(r'\boperator_*\b', line): error(filename, linenum, 'whitespace/parens', 0, 'Extra space before ( in function call') else: error(filename, linenum, 'whitespace/parens', 4, 'Extra space before ( in function call') # If the ) is followed only by a newline or a { + newline, assume it's # part of a control statement (if/while/etc), and don't complain if Search(r'[^)]\s+\)\s*[^{\s]', fncall): # If the closing parenthesis is preceded by only whitespaces, # try to give a more descriptive error message. if Search(r'^\s+\)', fncall): error(filename, linenum, 'whitespace/parens', 2, 'Closing ) should be moved to the previous line') else: error(filename, linenum, 'whitespace/parens', 2, 'Extra space before )') def IsBlankLine(line): """Returns true if the given line is blank. We consider a line to be blank if the line is empty or consists of only white spaces. Args: line: A line of a string. Returns: True, if the given line is blank. """ return not line or line.isspace() def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error): is_namespace_indent_item = ( len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and nesting_state.previous_stack_top == nesting_state.stack[-2]) if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, clean_lines.elided, line): CheckItemIndentationInNamespace(filename, clean_lines.elided, line, error) def CheckForFunctionLengths(filename, clean_lines, linenum, function_state, error): """Reports for long function bodies. For an overview why this is done, see: https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions Uses a simplistic algorithm assuming other style guidelines (especially spacing) are followed. Only checks unindented functions, so class members are unchecked. Trivial bodies are unchecked, so constructors with huge initializer lists may be missed. Blank/comment lines are not counted so as to avoid encouraging the removal of vertical space and comments just to get through a lint check. NOLINT *on the last line of a function* disables this check. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. function_state: Current function name and lines in body so far. error: The function to call with any errors found. """ lines = clean_lines.lines line = lines[linenum] joined_line = '' starting_func = False regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... match_result = Match(regexp, line) if match_result: # If the name is all caps and underscores, figure it's a macro and # ignore it, unless it's TEST or TEST_F. function_name = match_result.group(1).split()[-1] if function_name == 'TEST' or function_name == 'TEST_F' or ( not Match(r'[A-Z_]+$', function_name)): starting_func = True if starting_func: body_found = False for start_linenum in xrange(linenum, clean_lines.NumLines()): start_line = lines[start_linenum] joined_line += ' ' + start_line.lstrip() if Search(r'(;|})', start_line): # Declarations and trivial functions body_found = True break # ... ignore elif Search(r'{', start_line): body_found = True function = Search(r'((\w|:)*)\(', line).group(1) if Match(r'TEST', function): # Handle TEST... macros parameter_regexp = Search(r'(\(.*\))', joined_line) if parameter_regexp: # Ignore bad syntax function += parameter_regexp.group(1) else: function += '()' function_state.Begin(function) break if not body_found: # No body for the function (or evidence of a non-function) was found. error(filename, linenum, 'readability/fn_size', 5, 'Lint failed to find start of function body.') elif Match(r'^\}\s*$', line): # function end function_state.Check(error, filename, linenum) function_state.End() elif not Match(r'^\s*$', line): function_state.Count() # Count non-blank/non-comment lines. _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') def CheckComment(line, filename, linenum, next_line_start, error): """Checks for common mistakes in comments. Args: line: The line in question. filename: The name of the current file. linenum: The number of the line to check. next_line_start: The first non-whitespace column of the next line. error: The function to call with any errors found. """ commentpos = line.find('//') if commentpos != -1: # Check if the // may be in quotes. If so, ignore it if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0: # Allow one space for new scopes, two spaces otherwise: if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and ((commentpos >= 1 and line[commentpos-1] not in string.whitespace) or (commentpos >= 2 and line[commentpos-2] not in string.whitespace))): error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments') # Checks for common mistakes in TODO comments. comment = line[commentpos:] match = _RE_PATTERN_TODO.match(comment) if match: # One whitespace is correct; zero whitespace is handled elsewhere. leading_whitespace = match.group(1) if len(leading_whitespace) > 1: error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if not username: error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like ' '"// TODO(my_username): Stuff."') middle_whitespace = match.group(3) # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison if middle_whitespace != ' ' and middle_whitespace != '': error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') # If the comment contains an alphanumeric character, there # should be a space somewhere between it and the // unless # it's a /// or //! Doxygen comment. if (Match(r'//[^ ]*\w', comment) and not Match(r'(///|//\!)(\s+|$)', comment)): error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment') def CheckAccess(filename, clean_lines, linenum, nesting_state, error): """Checks for improper use of DISALLOW* macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) if not matched: return if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): if nesting_state.stack[-1].access != 'private': error(filename, linenum, 'readability/constructors', 3, '%s must be in the private: section' % matched.group(1)) else: # Found DISALLOW* macro outside a class declaration, or perhaps it # was used inside a function when it should have been part of the # class declaration. We could issue a warning here, but it # probably resulted in a compiler error already. pass def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): """Checks for the correctness of various spacing issues in the code. Things we check for: spaces around operators, spaces after if/for/while/switch, no spaces around parens in function calls, two spaces between code and comment, don't start a block with a blank line, don't end a function with a blank line, don't add a blank line after public/protected/private, don't have too many blank lines in a row. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw = clean_lines.lines_without_raw_strings line = raw[linenum] # Before nixing comments, check if the line is blank for no good # reason. This includes the first line after a block is opened, and # blank lines at the end of a function (ie, right before a line like '}' # # Skip all the blank line checks if we are immediately inside a # namespace body. In other words, don't issue blank line warnings # for this block: # namespace { # # } # # A warning about missing end of namespace comments will be issued instead. # # Also skip blank line checks for 'extern "C"' blocks, which are formatted # like namespaces. if (IsBlankLine(line) and not nesting_state.InNamespaceBody() and not nesting_state.InExternC()): elided = clean_lines.elided prev_line = elided[linenum - 1] prevbrace = prev_line.rfind('{') # TODO(unknown): Don't complain if line before blank line, and line after, # both start with alnums and are indented the same amount. # This ignores whitespace at the start of a namespace block # because those are not usually indented. if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: # OK, we have a blank line at the start of a code block. Before we # complain, we check if it is an exception to the rule: The previous # non-empty line has the parameters of a function header that are indented # 4 spaces (because they did not fit in a 80 column line when placed on # the same line as the function name). We also check for the case where # the previous line is indented 6 spaces, which may happen when the # initializers of a constructor do not fit into a 80 column line. exception = False if Match(r' {6}\w', prev_line): # Initializer list? # We are looking for the opening column of initializer list, which # should be indented 4 spaces to cause 6 space indentation afterwards. search_position = linenum-2 while (search_position >= 0 and Match(r' {6}\w', elided[search_position])): search_position -= 1 exception = (search_position >= 0 and elided[search_position][:5] == ' :') else: # Search for the function arguments or an initializer list. We use a # simple heuristic here: If the line is indented 4 spaces; and we have a # closing paren, without the opening paren, followed by an opening brace # or colon (for initializer lists) we assume that it is the last line of # a function header. If we have a colon indented 4 spaces, it is an # initializer list. exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', prev_line) or Match(r' {4}:', prev_line)) if not exception: error(filename, linenum, 'whitespace/blank_line', 2, 'Redundant blank line at the start of a code block ' 'should be deleted.') # Ignore blank lines at the end of a block in a long if-else # chain, like this: # if (condition1) { # // Something followed by a blank line # # } else if (condition2) { # // Something else # } if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] if (next_line and Match(r'\s*}', next_line) and next_line.find('} else ') == -1): error(filename, linenum, 'whitespace/blank_line', 3, 'Redundant blank line at the end of a code block ' 'should be deleted.') matched = Match(r'\s*(public|protected|private):', prev_line) if matched: error(filename, linenum, 'whitespace/blank_line', 3, 'Do not leave a blank line after "%s:"' % matched.group(1)) # Next, check comments next_line_start = 0 if linenum + 1 < clean_lines.NumLines(): next_line = raw[linenum + 1] next_line_start = len(next_line) - len(next_line.lstrip()) CheckComment(line, filename, linenum, next_line_start, error) # get rid of comments and strings line = clean_lines.elided[linenum] # You shouldn't have spaces before your brackets, except maybe after # 'delete []' or 'return []() {};' if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line): error(filename, linenum, 'whitespace/braces', 5, 'Extra space before [') # In range-based for, we wanted spaces before and after the colon, but # not around "::" tokens that might appear. if (Search(r'for *\(.*[^:]:[^: ]', line) or Search(r'for *\(.*[^: ]:[^:]', line)): error(filename, linenum, 'whitespace/forcolon', 2, 'Missing space around colon in range-based for loop') def CheckOperatorSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around operators. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Don't try to do spacing checks for operator methods. Do this by # replacing the troublesome characters with something else, # preserving column position for all other characters. # # The replacement is done repeatedly to avoid false positives from # operators that call operators. while True: match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line) if match: line = match.group(1) + ('_' * len(match.group(2))) + match.group(3) else: break # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". # Otherwise not. Note we only check for non-spaces on *both* sides; # sometimes people put non-spaces on one side when aligning ='s among # many lines (not that this is behavior that I approve of...) if ((Search(r'[\w.]=', line) or Search(r'=[\w.]', line)) and not Search(r'\b(if|while|for) ', line) # Operators taken from [lex.operators] in C++11 standard. and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line) and not Search(r'operator=', line)): error(filename, linenum, 'whitespace/operators', 4, 'Missing spaces around =') # It's ok not to have spaces around binary operators like + - * /, but if # there's too little whitespace, we get concerned. It's hard to tell, # though, so we punt on this one for now. TODO. # You should always have whitespace around binary operators. # # Check <= and >= first to avoid false positives with < and >, then # check non-include lines for spacing around < and >. # # If the operator is followed by a comma, assume it's be used in a # macro context and don't do any checks. This avoids false # positives. # # Note that && is not included here. This is because there are too # many false positives due to RValue references. match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around %s' % match.group(1)) elif not Match(r'#.*include', line): # Look for < that is not surrounded by spaces. This is only # triggered if both sides are missing spaces, even though # technically should should flag if at least one side is missing a # space. This is done to avoid some false positives with shifts. match = Match(r'^(.*[^\s<])<[^\s=<,]', line) if match: (_, _, end_pos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if end_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <') # Look for > that is not surrounded by spaces. Similar to the # above, we only trigger if both sides are missing spaces to avoid # false positives with shifts. match = Match(r'^(.*[^-\s>])>[^\s=>,]', line) if match: (_, _, start_pos) = ReverseCloseExpression( clean_lines, linenum, len(match.group(1))) if start_pos <= -1: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >') # We allow no-spaces around << when used like this: 10<<20, but # not otherwise (particularly, not when used as streams) # # We also allow operators following an opening parenthesis, since # those tend to be macros that deal with operators. match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line) if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and not (match.group(1) == 'operator' and match.group(2) == ';')): error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around <<') # We allow no-spaces around >> for almost anything. This is because # C++11 allows ">>" to close nested templates, which accounts for # most cases when ">>" is not followed by a space. # # We still warn on ">>" followed by alpha character, because that is # likely due to ">>" being used for right shifts, e.g.: # value >> alpha # # When ">>" is used to close templates, the alphanumeric letter that # follows would be part of an identifier, and there should still be # a space separating the template type and the identifier. # type<type<type>> alpha match = Search(r'>>[a-zA-Z_]', line) if match: error(filename, linenum, 'whitespace/operators', 3, 'Missing spaces around >>') # There shouldn't be space around unary operators match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if match: error(filename, linenum, 'whitespace/operators', 4, 'Extra space for operator %s' % match.group(1)) def CheckParenthesisSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing around parentheses. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # No spaces after an if, while, switch, or for match = Search(r' (if\(|for\(|while\(|switch\()', line) if match: error(filename, linenum, 'whitespace/parens', 5, 'Missing space before ( in %s' % match.group(1)) # For if/for/while/switch, the left and right parens should be # consistent about how many spaces are inside the parens, and # there should either be zero or one spaces inside the parens. # We don't want: "if ( foo)" or "if ( foo )". # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. match = Search(r'\b(if|for|while|switch)\s*' r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', line) if match: if len(match.group(2)) != len(match.group(4)): if not (match.group(3) == ';' and len(match.group(2)) == 1 + len(match.group(4)) or not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): error(filename, linenum, 'whitespace/parens', 5, 'Mismatching spaces inside () in %s' % match.group(1)) if len(match.group(2)) not in [0, 1]: error(filename, linenum, 'whitespace/parens', 5, 'Should have zero or one spaces inside ( and ) in %s' % match.group(1)) def CheckCommaSpacing(filename, clean_lines, linenum, error): """Checks for horizontal spacing near commas and semicolons. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ raw = clean_lines.lines_without_raw_strings line = clean_lines.elided[linenum] # You should always have a space after a comma (either as fn arg or operator) # # This does not apply when the non-space character following the # comma is another comma, since the only time when that happens is # for empty macro arguments. # # We run this check in two passes: first pass on elided lines to # verify that lines contain missing whitespaces, second pass on raw # lines to confirm that those missing whitespaces are not due to # elided comments. if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and Search(r',[^,\s]', raw[linenum])): error(filename, linenum, 'whitespace/comma', 3, 'Missing space after ,') # You should always have a space after a semicolon # except for few corner cases # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more # space after ; if Search(r';[^\s};\\)/]', line): error(filename, linenum, 'whitespace/semicolon', 3, 'Missing space after ;') def _IsType(clean_lines, nesting_state, expr): """Check if expression looks like a type name, returns true if so. Args: clean_lines: A CleansedLines instance containing the file. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. expr: The expression to check. Returns: True, if token looks like a type. """ # Keep only the last token in the expression last_word = Match(r'^.*(\b\S+)$', expr) if last_word: token = last_word.group(1) else: token = expr # Match native types and stdint types if _TYPES.match(token): return True # Try a bit harder to match templated types. Walk up the nesting # stack until we find something that resembles a typename # declaration for what we are looking for. typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) + r'\b') block_index = len(nesting_state.stack) - 1 while block_index >= 0: if isinstance(nesting_state.stack[block_index], _NamespaceInfo): return False # Found where the opening brace is. We want to scan from this # line up to the beginning of the function, minus a few lines. # template <typename Type1, // stop scanning here # ...> # class C # : public ... { // start scanning here last_line = nesting_state.stack[block_index].starting_linenum next_block_start = 0 if block_index > 0: next_block_start = nesting_state.stack[block_index - 1].starting_linenum first_line = last_line while first_line >= next_block_start: if clean_lines.elided[first_line].find('template') >= 0: break first_line -= 1 if first_line < next_block_start: # Didn't find any "template" keyword before reaching the next block, # there are probably no template things to check for this block block_index -= 1 continue # Look for typename in the specified range for i in xrange(first_line, last_line + 1, 1): if Search(typename_pattern, clean_lines.elided[i]): return True block_index -= 1 return False def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error): """Checks for horizontal spacing near commas. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Except after an opening paren, or after another opening brace (in case of # an initializer list, for instance), you should have spaces before your # braces when they are delimiting blocks, classes, namespaces etc. # And since you should never have braces at the beginning of a line, # this is an easy test. Except that braces used for initialization don't # follow the same rule; we often don't want spaces before those. match = Match(r'^(.*[^ ({>]){', line) if match: # Try a bit harder to check for brace initialization. This # happens in one of the following forms: # Constructor() : initializer_list_{} { ... } # Constructor{}.MemberFunction() # Type variable{}; # FunctionCall(type{}, ...); # LastArgument(..., type{}); # LOG(INFO) << type{} << " ..."; # map_of_type[{...}] = ...; # ternary = expr ? new type{} : nullptr; # OuterTemplate<InnerTemplateConstructor<Type>{}> # # We check for the character following the closing brace, and # silence the warning if it's one of those listed above, i.e. # "{.;,)<>]:". # # To account for nested initializer list, we allow any number of # closing braces up to "{;,)<". We can't simply silence the # warning on first sight of closing brace, because that would # cause false negatives for things that are not initializer lists. # Silence this: But not this: # Outer{ if (...) { # Inner{...} if (...){ // Missing space before { # }; } # # There is a false negative with this approach if people inserted # spurious semicolons, e.g. "if (cond){};", but we will catch the # spurious semicolon with a separate check. leading_text = match.group(1) (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) trailing_text = '' if endpos > -1: trailing_text = endline[endpos:] for offset in xrange(endlinenum + 1, min(endlinenum + 3, clean_lines.NumLines() - 1)): trailing_text += clean_lines.elided[offset] # We also suppress warnings for `uint64_t{expression}` etc., as the style # guide recommends brace initialization for integral types to avoid # overflow/truncation. if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text) and not _IsType(clean_lines, nesting_state, leading_text)): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before {') # Make sure '} else {' has spaces. if Search(r'}else', line): error(filename, linenum, 'whitespace/braces', 5, 'Missing space before else') # You shouldn't have a space before a semicolon at the end of the line. # There's a special case for "for" since the style guide allows space before # the semicolon there. if Search(r':\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use {} instead.') elif Search(r'^\s*;\s*$', line): error(filename, linenum, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use {} instead.') elif (Search(r'\s+;\s*$', line) and not Search(r'\bfor\b', line)): error(filename, linenum, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use {} instead.') def IsDecltype(clean_lines, linenum, column): """Check if the token ending on (linenum, column) is decltype(). Args: clean_lines: A CleansedLines instance containing the file. linenum: the number of the line to check. column: end column of the token to check. Returns: True if this token is decltype() expression, False otherwise. """ (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column) if start_col < 0: return False if Search(r'\bdecltype\s*$', text[0:start_col]): return True return False def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): """Checks for additional blank line issues related to sections. Currently the only thing checked here is blank line before protected/private. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. class_info: A _ClassInfo objects. linenum: The number of the line to check. error: The function to call with any errors found. """ # Skip checks if the class is small, where small means 25 lines or less. # 25 lines seems like a good cutoff since that's the usual height of # terminals, and any class that can't fit in one screen can't really # be considered "small". # # Also skip checks if we are on the first line. This accounts for # classes that look like # class Foo { public: ... }; # # If we didn't find the end of the class, last_line would be zero, # and the check will be skipped by the first condition. if (class_info.last_line - class_info.starting_linenum <= 24 or linenum <= class_info.starting_linenum): return matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) if matched: # Issue warning if the line before public/protected/private was # not a blank line, but don't do this if the previous line contains # "class" or "struct". This can happen two ways: # - We are at the beginning of the class. # - We are forward-declaring an inner class that is semantically # private, but needed to be public for implementation reasons. # Also ignores cases where the previous line ends with a backslash as can be # common when defining classes in C macros. prev_line = clean_lines.lines[linenum - 1] if (not IsBlankLine(prev_line) and not Search(r'\b(class|struct)\b', prev_line) and not Search(r'\\$', prev_line)): # Try a bit harder to find the beginning of the class. This is to # account for multi-line base-specifier lists, e.g.: # class Derived # : public Base { end_class_head = class_info.starting_linenum for i in range(class_info.starting_linenum, linenum): if Search(r'\{\s*$', clean_lines.lines[i]): end_class_head = i break if end_class_head < linenum - 1: error(filename, linenum, 'whitespace/blank_line', 3, '"%s:" should be preceded by a blank line' % matched.group(1)) def GetPreviousNonBlankLine(clean_lines, linenum): """Return the most recent non-blank line and its line number. Args: clean_lines: A CleansedLines instance containing the file contents. linenum: The number of the line to check. Returns: A tuple with two elements. The first element is the contents of the last non-blank line before the current line, or the empty string if this is the first non-blank line. The second is the line number of that line, or -1 if this is the first non-blank line. """ prevlinenum = linenum - 1 while prevlinenum >= 0: prevline = clean_lines.elided[prevlinenum] if not IsBlankLine(prevline): # if not a blank line... return (prevline, prevlinenum) prevlinenum -= 1 return ('', -1) def CheckBraces(filename, clean_lines, linenum, error): """Looks for misplaced braces (e.g. at the end of line). Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # get rid of comments and strings if Match(r'\s*{\s*$', line): # We allow an open brace to start a line in the case where someone is using # braces in a block to explicitly create a new scope, which is commonly used # to control the lifetime of stack-allocated variables. Braces are also # used for brace initializers inside function calls. We don't detect this # perfectly: we just don't complain if the last non-whitespace character on # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the # previous line starts a preprocessor block. We also allow a brace on the # following line if it is part of an array initialization and would not fit # within the 80 character limit of the preceding line. prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if (not Search(r'[,;:}{(]\s*$', prevline) and not Match(r'\s*#', prevline) and not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)): error(filename, linenum, 'whitespace/braces', 4, '{ should almost always be at the end of the previous line') # An else clause should be on the same line as the preceding closing brace. if Match(r'\s*else\b\s*(?:if\b|\{|$)', line): prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if Match(r'\s*}\s*$', prevline): error(filename, linenum, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') # If braces come on one side of an else, they should be on both. # However, we have to worry about "else if" that spans multiple lines! if Search(r'else if\s*\(', line): # could be multi-line if brace_on_left = bool(Search(r'}\s*else if\s*\(', line)) # find the ( after the if pos = line.find('else if') pos = line.find('(', pos) if pos > 0: (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) brace_on_right = endline[endpos:].find('{') != -1 if brace_on_left != brace_on_right: # must be brace after if error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): error(filename, linenum, 'readability/braces', 5, 'If an else has a brace on one side, it should have it on both') # Likewise, an else should never have the else clause on the same line if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): error(filename, linenum, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') # In the same way, a do/while should never be on one line if Match(r'\s*do [^\s{]', line): error(filename, linenum, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') # Check single-line if/else bodies. The style guide says 'curly braces are not # required for single-line statements'. We additionally allow multi-line, # single statements, but we reject anything with more than one semicolon in # it. This means that the first semicolon after the if should be at the end of # its line, and the line after that should have an indent level equal to or # lower than the if. We also check for ambiguous if/else nesting without # braces. if_else_match = Search(r'\b(if\s*\(|else\b)', line) if if_else_match and not Match(r'\s*#', line): if_indent = GetIndentLevel(line) endline, endlinenum, endpos = line, linenum, if_else_match.end() if_match = Search(r'\bif\s*\(', line) if if_match: # This could be a multiline if condition, so find the end first. pos = if_match.end() - 1 (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos) # Check for an opening brace, either directly after the if or on the next # line. If found, this isn't a single-statement conditional. if (not Match(r'\s*{', endline[endpos:]) and not (Match(r'\s*$', endline[endpos:]) and endlinenum < (len(clean_lines.elided) - 1) and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))): while (endlinenum < len(clean_lines.elided) and ';' not in clean_lines.elided[endlinenum][endpos:]): endlinenum += 1 endpos = 0 if endlinenum < len(clean_lines.elided): endline = clean_lines.elided[endlinenum] # We allow a mix of whitespace and closing braces (e.g. for one-liner # methods) and a single \ after the semicolon (for macros) endpos = endline.find(';') if not Match(r';[\s}]*(\\?)$', endline[endpos:]): # Semicolon isn't the last character, there's something trailing. # Output a warning if the semicolon is not contained inside # a lambda expression. if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$', endline): error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') elif endlinenum < len(clean_lines.elided) - 1: # Make sure the next line is dedented next_line = clean_lines.elided[endlinenum + 1] next_indent = GetIndentLevel(next_line) # With ambiguous nested if statements, this will error out on the # if that *doesn't* match the else, regardless of whether it's the # inner one or outer one. if (if_match and Match(r'\s*else\b', next_line) and next_indent != if_indent): error(filename, linenum, 'readability/braces', 4, 'Else clause should be indented at the same level as if. ' 'Ambiguous nested if/else chains require braces.') elif next_indent > if_indent: error(filename, linenum, 'readability/braces', 4, 'If/else bodies with multiple statements require braces') def CheckTrailingSemicolon(filename, clean_lines, linenum, error): """Looks for redundant trailing semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Block bodies should not be followed by a semicolon. Due to C++11 # brace initialization, there are more places where semicolons are # required than not, so we use a whitelist approach to check these # rather than a blacklist. These are the places where "};" should # be replaced by just "}": # 1. Some flavor of block following closing parenthesis: # for (;;) {}; # while (...) {}; # switch (...) {}; # Function(...) {}; # if (...) {}; # if (...) else if (...) {}; # # 2. else block: # if (...) else {}; # # 3. const member function: # Function(...) const {}; # # 4. Block following some statement: # x = 42; # {}; # # 5. Block at the beginning of a function: # Function(...) { # {}; # } # # Note that naively checking for the preceding "{" will also match # braces inside multi-dimensional arrays, but this is fine since # that expression will not contain semicolons. # # 6. Block following another block: # while (true) {} # {}; # # 7. End of namespaces: # namespace {}; # # These semicolons seems far more common than other kinds of # redundant semicolons, possibly due to people converting classes # to namespaces. For now we do not warn for this case. # # Try matching case 1 first. match = Match(r'^(.*\)\s*)\{', line) if match: # Matched closing parenthesis (case 1). Check the token before the # matching opening parenthesis, and don't warn if it looks like a # macro. This avoids these false positives: # - macro that defines a base class # - multi-line macro that defines a base class # - macro that defines the whole class-head # # But we still issue warnings for macros that we know are safe to # warn, specifically: # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P # - TYPED_TEST # - INTERFACE_DEF # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: # # We implement a whitelist of safe macros instead of a blacklist of # unsafe macros, even though the latter appears less frequently in # google code and would have been easier to implement. This is because # the downside for getting the whitelist wrong means some extra # semicolons, while the downside for getting the blacklist wrong # would result in compile errors. # # In addition to macros, we also don't want to warn on # - Compound literals # - Lambdas # - alignas specifier with anonymous structs # - decltype closing_brace_pos = match.group(1).rfind(')') opening_parenthesis = ReverseCloseExpression( clean_lines, linenum, closing_brace_pos) if opening_parenthesis[2] > -1: line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix) func = Match(r'^(.*\])\s*$', line_prefix) if ((macro and macro.group(1) not in ( 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or Search(r'\bdecltype$', line_prefix) or Search(r'\s+=\s*$', line_prefix)): match = None if (match and opening_parenthesis[1] > 1 and Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])): # Multi-line lambda-expression match = None else: # Try matching cases 2-3. match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) if not match: # Try matching cases 4-6. These are always matched on separate lines. # # Note that we can't simply concatenate the previous line to the # current line and do a single match, otherwise we may output # duplicate warnings for the blank line case: # if (cond) { # // blank line # } prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] if prevline and Search(r'[;{}]\s*$', prevline): match = Match(r'^(\s*)\{', line) # Check matching closing brace if match: (endline, endlinenum, endpos) = CloseExpression( clean_lines, linenum, len(match.group(1))) if endpos > -1 and Match(r'^\s*;', endline[endpos:]): # Current {} pair is eligible for semicolon check, and we have found # the redundant semicolon, output warning here. # # Note: because we are scanning forward for opening braces, and # outputting warnings for the matching closing brace, if there are # nested blocks with trailing semicolons, we will get the error # messages in reversed order. error(filename, endlinenum, 'readability/braces', 4, "You don't need a ; after a }") def CheckEmptyBlockBody(filename, clean_lines, linenum, error): """Look for empty loop/conditional body with only a single semicolon. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Search for loop keywords at the beginning of the line. Because only # whitespaces are allowed before the keywords, this will also ignore most # do-while-loops, since those lines should start with closing brace. # # We also check "if" blocks here, since an empty conditional block # is likely an error. line = clean_lines.elided[linenum] matched = Match(r'\s*(for|while|if)\s*\(', line) if matched: # Find the end of the conditional expression. (end_line, end_linenum, end_pos) = CloseExpression( clean_lines, linenum, line.find('(')) # Output warning if what follows the condition expression is a semicolon. # No warning for all other cases, including whitespace or newline, since we # have a separate check for semicolons preceded by whitespace. if end_pos >= 0 and Match(r';', end_line[end_pos:]): if matched.group(1) == 'if': error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, 'Empty conditional bodies should use {}') else: error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 'Empty loop bodies should use {} or continue') # Check for if statements that have completely empty bodies (no comments) # and no else clauses. if end_pos >= 0 and matched.group(1) == 'if': # Find the position of the opening { for the if statement. # Return without logging an error if it has no brackets. opening_linenum = end_linenum opening_line_fragment = end_line[end_pos:] # Loop until EOF or find anything that's not whitespace or opening {. while not Search(r'^\s*\{', opening_line_fragment): if Search(r'^(?!\s*$)', opening_line_fragment): # Conditional has no brackets. return opening_linenum += 1 if opening_linenum == len(clean_lines.elided): # Couldn't find conditional's opening { or any code before EOF. return opening_line_fragment = clean_lines.elided[opening_linenum] # Set opening_line (opening_line_fragment may not be entire opening line). opening_line = clean_lines.elided[opening_linenum] # Find the position of the closing }. opening_pos = opening_line_fragment.find('{') if opening_linenum == end_linenum: # We need to make opening_pos relative to the start of the entire line. opening_pos += end_pos (closing_line, closing_linenum, closing_pos) = CloseExpression( clean_lines, opening_linenum, opening_pos) if closing_pos < 0: return # Now construct the body of the conditional. This consists of the portion # of the opening line after the {, all lines until the closing line, # and the portion of the closing line before the }. if (clean_lines.raw_lines[opening_linenum] != CleanseComments(clean_lines.raw_lines[opening_linenum])): # Opening line ends with a comment, so conditional isn't empty. return if closing_linenum > opening_linenum: # Opening line after the {. Ignore comments here since we checked above. body = list(opening_line[opening_pos+1:]) # All lines until closing line, excluding closing line, with comments. body.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum]) # Closing line before the }. Won't (and can't) have comments. body.append(clean_lines.elided[closing_linenum][:closing_pos-1]) body = '\n'.join(body) else: # If statement has brackets and fits on a single line. body = opening_line[opening_pos+1:closing_pos-1] # Check if the body is empty if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body): return # The body is empty. Now make sure there's not an else clause. current_linenum = closing_linenum current_line_fragment = closing_line[closing_pos:] # Loop until EOF or find anything that's not whitespace or else clause. while Search(r'^\s*$|^(?=\s*else)', current_line_fragment): if Search(r'^(?=\s*else)', current_line_fragment): # Found an else clause, so don't log an error. return current_linenum += 1 if current_linenum == len(clean_lines.elided): break current_line_fragment = clean_lines.elided[current_linenum] # The body is empty and there's no else clause until EOF or other code. error(filename, end_linenum, 'whitespace/empty_if_body', 4, ('If statement had no body and no else clause')) def FindCheckMacro(line): """Find a replaceable CHECK-like macro. Args: line: line to search on. Returns: (macro name, start position), or (None, -1) if no replaceable macro is found. """ for macro in _CHECK_MACROS: i = line.find(macro) if i >= 0: # Find opening parenthesis. Do a regular expression match here # to make sure that we are matching the expected CHECK macro, as # opposed to some other macro that happens to contain the CHECK # substring. matched = Match(r'^(.*\b' + macro + r'\s*)\(', line) if not matched: continue return (macro, len(matched.group(1))) return (None, -1) def CheckCheck(filename, clean_lines, linenum, error): """Checks the use of CHECK and EXPECT macros. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Decide the set of replacement macros that should be suggested lines = clean_lines.elided (check_macro, start_pos) = FindCheckMacro(lines[linenum]) if not check_macro: return # Find end of the boolean expression by matching parentheses (last_line, end_line, end_pos) = CloseExpression( clean_lines, linenum, start_pos) if end_pos < 0: return # If the check macro is followed by something other than a # semicolon, assume users will log their own custom error messages # and don't suggest any replacements. if not Match(r'\s*;', last_line[end_pos:]): return if linenum == end_line: expression = lines[linenum][start_pos + 1:end_pos - 1] else: expression = lines[linenum][start_pos + 1:] for i in xrange(linenum + 1, end_line): expression += lines[i] expression += last_line[0:end_pos - 1] # Parse expression so that we can take parentheses into account. # This avoids false positives for inputs like "CHECK((a < 4) == b)", # which is not replaceable by CHECK_LE. lhs = '' rhs = '' operator = None while expression: matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' r'==|!=|>=|>|<=|<|\()(.*)$', expression) if matched: token = matched.group(1) if token == '(': # Parenthesized operand expression = matched.group(2) (end, _) = FindEndOfExpressionInLine(expression, 0, ['(']) if end < 0: return # Unmatched parenthesis lhs += '(' + expression[0:end] expression = expression[end:] elif token in ('&&', '||'): # Logical and/or operators. This means the expression # contains more than one term, for example: # CHECK(42 < a && a < b); # # These are not replaceable with CHECK_LE, so bail out early. return elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): # Non-relational operator lhs += token expression = matched.group(2) else: # Relational operator operator = token rhs = matched.group(2) break else: # Unparenthesized operand. Instead of appending to lhs one character # at a time, we do another regular expression match to consume several # characters at once if possible. Trivial benchmark shows that this # is more efficient when the operands are longer than a single # character, which is generally the case. matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) if not matched: matched = Match(r'^(\s*\S)(.*)$', expression) if not matched: break lhs += matched.group(1) expression = matched.group(2) # Only apply checks if we got all parts of the boolean expression if not (lhs and operator and rhs): return # Check that rhs do not contain logical operators. We already know # that lhs is fine since the loop above parses out && and ||. if rhs.find('&&') > -1 or rhs.find('||') > -1: return # At least one of the operands must be a constant literal. This is # to avoid suggesting replacements for unprintable things like # CHECK(variable != iterator) # # The following pattern matches decimal, hex integers, strings, and # characters (in that order). lhs = lhs.strip() rhs = rhs.strip() match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' if Match(match_constant, lhs) or Match(match_constant, rhs): # Note: since we know both lhs and rhs, we can provide a more # descriptive error message like: # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) # Instead of: # Consider using CHECK_EQ instead of CHECK(a == b) # # We are still keeping the less descriptive message because if lhs # or rhs gets long, the error message might become unreadable. error(filename, linenum, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % ( _CHECK_REPLACEMENT[check_macro][operator], check_macro, operator)) def CheckAltTokens(filename, clean_lines, linenum, error): """Check alternative keywords being used in boolean expressions. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Avoid preprocessor lines if Match(r'^\s*#', line): return # Last ditch effort to avoid multi-line comments. This will not help # if the comment started before the current line or ended after the # current line, but it catches most of the false positives. At least, # it provides a way to workaround this warning for people who use # multi-line comments in preprocessor macros. # # TODO(unknown): remove this once cpplint has better support for # multi-line comments. if line.find('/*') >= 0 or line.find('*/') >= 0: return for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): error(filename, linenum, 'readability/alt_tokens', 2, 'Use operator %s instead of %s' % ( _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) def GetLineWidth(line): """Determines the width of the line in column positions. Args: line: A string, which may be a Unicode string. Returns: The width of the line in column positions, accounting for Unicode combining characters and wide characters. """ if isinstance(line, unicode): width = 0 for uc in unicodedata.normalize('NFC', line): if unicodedata.east_asian_width(uc) in ('W', 'F'): width += 2 elif not unicodedata.combining(uc): width += 1 return width else: return len(line) def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we do what we can. In particular we check for 2-space indents, line lengths, tab usage, spaces inside code, etc. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Don't use "elided" lines here, otherwise we can't check commented lines. # Don't want to use "raw" either, because we don't want to check inside C++11 # raw strings, raw_lines = clean_lines.lines_without_raw_strings line = raw_lines[linenum] prev = raw_lines[linenum - 1] if linenum > 0 else '' if line.find('\t') != -1: error(filename, linenum, 'whitespace/tab', 1, 'Tab found; better to use spaces') # One or three blank spaces at the beginning of the line is weird; it's # hard to reconcile that with 2-space indents. # NOTE: here are the conditions rob pike used for his tests. Mine aren't # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces # if(RLENGTH > 20) complain = 0; # if(match($0, " +(error|private|public|protected):")) complain = 0; # if(match(prev, "&& *$")) complain = 0; # if(match(prev, "\\|\\| *$")) complain = 0; # if(match(prev, "[\",=><] *$")) complain = 0; # if(match($0, " <<")) complain = 0; # if(match(prev, " +for \\(")) complain = 0; # if(prevodd && match(prevprev, " +for \\(")) complain = 0; scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$' classinfo = nesting_state.InnermostClass() initial_spaces = 0 cleansed_line = clean_lines.elided[linenum] while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 # There are certain situations we allow one space, notably for # section labels, and also lines containing multi-line raw strings. # We also don't check for lines that look like continuation lines # (of lines ending in double quotes, commas, equals, or angle brackets) # because the rules for how to indent those are non-trivial. if (not Search(r'[",=><] *$', prev) and (initial_spaces == 1 or initial_spaces == 3) and not Match(scope_or_label_pattern, cleansed_line) and not (clean_lines.raw_lines[linenum] != line and Match(r'^\s*""', line))): error(filename, linenum, 'whitespace/indent', 3, 'Weird number of spaces at line-start. ' 'Are you using a 2-space indent?') if line and line[-1].isspace(): error(filename, linenum, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') # Check if the line is a header guard. is_header_guard = False if file_extension == 'h': cppvar = GetHeaderGuardCPPVariable(filename) if (line.startswith('#ifndef %s' % cppvar) or line.startswith('#define %s' % cppvar) or line.startswith('#endif // %s' % cppvar)): is_header_guard = True # #include lines and header guards can be long, since there's no clean way to # split them. # # URLs can be long too. It's possible to split these, but it makes them # harder to cut&paste. # # The "$Id:...$" comment may also get very long without it being the # developers fault. if (not line.startswith('#include') and not is_header_guard and not Match(r'^\s*//.*http(s?)://\S*$', line) and not Match(r'^\s*//\s*[^\s]*$', line) and not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): line_width = GetLineWidth(line) if line_width > _line_length: error(filename, linenum, 'whitespace/line_length', 2, 'Lines should be <= %i characters long' % _line_length) if (cleansed_line.count(';') > 1 and # for loops are allowed two ;'s (and may run over two lines). cleansed_line.find('for') == -1 and (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and # It's ok to have many commands in a switch case that fits in 1 line not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) and cleansed_line.find('break;') != -1)): error(filename, linenum, 'whitespace/newline', 0, 'More than one command on the same line') # Some more style checks CheckBraces(filename, clean_lines, linenum, error) CheckTrailingSemicolon(filename, clean_lines, linenum, error) CheckEmptyBlockBody(filename, clean_lines, linenum, error) CheckAccess(filename, clean_lines, linenum, nesting_state, error) CheckSpacing(filename, clean_lines, linenum, nesting_state, error) CheckOperatorSpacing(filename, clean_lines, linenum, error) CheckParenthesisSpacing(filename, clean_lines, linenum, error) CheckCommaSpacing(filename, clean_lines, linenum, error) CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error) CheckSpacingForFunctionCall(filename, clean_lines, linenum, error) CheckCheck(filename, clean_lines, linenum, error) CheckAltTokens(filename, clean_lines, linenum, error) classinfo = nesting_state.InnermostClass() if classinfo: CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') # Matches the first component of a filename delimited by -s and _s. That is: # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') def _DropCommonSuffixes(filename): """Drops common suffixes like _test.cc or -inl.h from filename. For example: >>> _DropCommonSuffixes('foo/foo-inl.h') 'foo/foo' >>> _DropCommonSuffixes('foo/bar/foo.cc') 'foo/bar/foo' >>> _DropCommonSuffixes('foo/foo_internal.h') 'foo/foo' >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 'foo/foo_unusualinternal' Args: filename: The input filename. Returns: The filename with the common suffix removed. """ for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 'inl.h', 'impl.h', 'internal.h'): if (filename.endswith(suffix) and len(filename) > len(suffix) and filename[-len(suffix) - 1] in ('-', '_')): return filename[:-len(suffix) - 1] return os.path.splitext(filename)[0] def _ClassifyInclude(fileinfo, include, is_system): """Figures out what kind of header 'include' is. Args: fileinfo: The current file cpplint is running over. A FileInfo instance. include: The path to a #included file. is_system: True if the #include used <> rather than "". Returns: One of the _XXX_HEADER constants. For example: >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) _C_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) _CPP_SYS_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) _LIKELY_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), ... 'bar/foo_other_ext.h', False) _POSSIBLE_MY_HEADER >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) _OTHER_HEADER """ # This is a list of all standard c++ header files, except # those already checked for above. is_cpp_h = include in _CPP_HEADERS if is_system: if is_cpp_h: return _CPP_SYS_HEADER else: return _C_SYS_HEADER # If the target file and the include we're checking share a # basename when we drop common extensions, and the include # lives in . , then it's likely to be owned by the target file. target_dir, target_base = ( os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) if target_base == include_base and ( include_dir == target_dir or include_dir == os.path.normpath(target_dir + '/../public')): return _LIKELY_MY_HEADER # If the target and include share some initial basename # component, it's possible the target is implementing the # include, so it's allowed to be first, but we'll never # complain if it's not there. target_first_component = _RE_FIRST_COMPONENT.match(target_base) include_first_component = _RE_FIRST_COMPONENT.match(include_base) if (target_first_component and include_first_component and target_first_component.group(0) == include_first_component.group(0)): return _POSSIBLE_MY_HEADER return _OTHER_HEADER def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): """Check rules that are applicable to #include lines. Strings on #include lines are NOT removed from elided line, to make certain tasks easier. However, to prevent false positives, checks applicable to #include lines in CheckLanguage must be put here. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. include_state: An _IncludeState instance in which the headers are inserted. error: The function to call with any errors found. """ fileinfo = FileInfo(filename) line = clean_lines.lines[linenum] # "include" should use the new style "foo/bar.h" instead of just "bar.h" # Only do this check if the included header follows google naming # conventions. If not, assume that it's a 3rd party API that # requires special include conventions. # # We also make an exception for Lua headers, which follow google # naming convention but not the include convention. match = Match(r'#include\s*"([^/]+\.h)"', line) if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)): error(filename, linenum, 'build/include', 4, 'Include the directory when naming .h files') # we shouldn't include a file more than once. actually, there are a # handful of instances where doing so is okay, but in general it's # not. match = _RE_PATTERN_INCLUDE.search(line) if match: include = match.group(2) is_system = (match.group(1) == '<') duplicate_line = include_state.FindHeader(include) if duplicate_line >= 0: error(filename, linenum, 'build/include', 4, '"%s" already included at %s:%s' % (include, filename, duplicate_line)) elif (include.endswith('.cc') and os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)): error(filename, linenum, 'build/include', 4, 'Do not include .cc files from other packages') elif not _THIRD_PARTY_HEADERS_PATTERN.match(include): include_state.include_list[-1].append((include, linenum)) # We want to ensure that headers appear in the right order: # 1) for foo.cc, foo.h (preferred location) # 2) c system files # 3) cpp system files # 4) for foo.cc, foo.h (deprecated location) # 5) other google headers # # We classify each include statement as one of those 5 types # using a number of techniques. The include_state object keeps # track of the highest type seen, and complains if we see a # lower type after that. error_message = include_state.CheckNextIncludeOrder( _ClassifyInclude(fileinfo, include, is_system)) if error_message: error(filename, linenum, 'build/include_order', 4, '%s. Should be: %s.h, c system, c++ system, other.' % (error_message, fileinfo.BaseName())) canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) if not include_state.IsInAlphabeticalOrder( clean_lines, linenum, canonical_include): error(filename, linenum, 'build/include_alpha', 4, 'Include "%s" not in alphabetical order' % include) include_state.SetLastHeader(canonical_include) def _GetTextInside(text, start_pattern): r"""Retrieves all the text between matching open and close parentheses. Given a string of lines and a regular expression string, retrieve all the text following the expression and between opening punctuation symbols like (, [, or {, and the matching close-punctuation symbol. This properly nested occurrences of the punctuations, so for the text like printf(a(), b(c())); a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. start_pattern must match string having an open punctuation symbol at the end. Args: text: The lines to extract text. Its comments and strings must be elided. It can be single line and can span multiple lines. start_pattern: The regexp string indicating where to start extracting the text. Returns: The extracted text. None if either the opening string or ending punctuation could not be found. """ # TODO(unknown): Audit cpplint.py to see what places could be profitably # rewritten to use _GetTextInside (and use inferior regexp matching today). # Give opening punctuations to get the matching close-punctuations. matching_punctuation = {'(': ')', '{': '}', '[': ']'} closing_punctuation = set(matching_punctuation.itervalues()) # Find the position to start extracting text. match = re.search(start_pattern, text, re.M) if not match: # start_pattern not found in text. return None start_position = match.end(0) assert start_position > 0, ( 'start_pattern must ends with an opening punctuation.') assert text[start_position - 1] in matching_punctuation, ( 'start_pattern must ends with an opening punctuation.') # Stack of closing punctuations we expect to have in text after position. punctuation_stack = [matching_punctuation[text[start_position - 1]]] position = start_position while punctuation_stack and position < len(text): if text[position] == punctuation_stack[-1]: punctuation_stack.pop() elif text[position] in closing_punctuation: # A closing punctuation without matching opening punctuations. return None elif text[position] in matching_punctuation: punctuation_stack.append(matching_punctuation[text[position]]) position += 1 if punctuation_stack: # Opening punctuations left without matching close-punctuations. return None # punctuations match. return text[start_position:position - 1] # Patterns for matching call-by-reference parameters. # # Supports nested templates up to 2 levels deep using this messy pattern: # < (?: < (?: < [^<>]* # > # | [^<>] )* # > # | [^<>] )* # > _RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* _RE_PATTERN_TYPE = ( r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' r'(?:\w|' r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' r'::)+') # A call-by-reference parameter ends with '& identifier'. _RE_PATTERN_REF_PARAM = re.compile( r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') # A call-by-const-reference parameter either ends with 'const& identifier' # or looks like 'const type& identifier' when 'type' is atomic. _RE_PATTERN_CONST_REF_PARAM = ( r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') # Stream types. _RE_PATTERN_REF_STREAM_PARAM = ( r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')') def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, nesting_state, error): """Checks rules from the 'C++ language rules' section of cppguide.html. Some of these rules are hard to test (function overloading, using uint32 inappropriately), but we do the best we can. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. file_extension: The extension (without the dot) of the filename. include_state: An _IncludeState instance in which the headers are inserted. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # If the line is empty or consists of entirely a comment, no need to # check it. line = clean_lines.elided[linenum] if not line: return match = _RE_PATTERN_INCLUDE.search(line) if match: CheckIncludeLine(filename, clean_lines, linenum, include_state, error) return # Reset include state across preprocessor directives. This is meant # to silence warnings for conditional includes. match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line) if match: include_state.ResetSection(match.group(1)) # Make Windows paths like Unix. fullname = os.path.abspath(filename).replace('\\', '/') # Perform other checks now that we are sure that this is not an include line CheckCasts(filename, clean_lines, linenum, error) CheckGlobalStatic(filename, clean_lines, linenum, error) CheckPrintf(filename, clean_lines, linenum, error) if file_extension == 'h': # TODO(unknown): check that 1-arg constructors are explicit. # How to tell it's a constructor? # (handled in CheckForNonStandardConstructs for now) # TODO(unknown): check that classes declare or disable copy/assign # (level 1 error) pass # Check if people are using the verboten C basic types. The only exception # we regularly allow is "unsigned short port" for port. if Search(r'\bshort port\b', line): if not Search(r'\bunsigned short port\b', line): error(filename, linenum, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') else: match = Search(r'\b(short|long(?! +double)|long long)\b', line) if match: error(filename, linenum, 'runtime/int', 4, 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) # Check if some verboten operator overloading is going on # TODO(unknown): catch out-of-line unary operator&: # class X {}; # int operator&(const X& x) { return 42; } // unary operator& # The trick is it's hard to tell apart from binary operator&: # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& if Search(r'\boperator\s*&\s*\(\s*\)', line): error(filename, linenum, 'runtime/operator', 4, 'Unary operator& is dangerous. Do not use it.') # Check for suspicious usage of "if" like # } if (a == b) { if Search(r'\}\s*if\s*\(', line): error(filename, linenum, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') # Check for potential format string bugs like printf(foo). # We constrain the pattern not to pick things like DocidForPrintf(foo). # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) # TODO(unknown): Catch the following case. Need to change the calling # convention of the whole function to process multiple line to handle it. # printf( # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') if printf_args: match = Match(r'([\w.\->()]+)$', printf_args) if match and match.group(1) != '__VA_ARGS__': function_name = re.search(r'\b((?:string)?printf)\s*\(', line, re.I).group(1) error(filename, linenum, 'runtime/printf', 4, 'Potential format string bug. Do %s("%%s", %s) instead.' % (function_name, match.group(1))) # Check for potential memset bugs like memset(buf, sizeof(buf), 0). match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): error(filename, linenum, 'runtime/memset', 4, 'Did you mean "memset(%s, 0, %s)"?' % (match.group(1), match.group(2))) if Search(r'\busing namespace\b', line): error(filename, linenum, 'build/namespaces', 5, 'Do not use namespace using-directives. ' 'Use using-declarations instead.') # Detect variable-length arrays. match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) if (match and match.group(2) != 'return' and match.group(2) != 'delete' and match.group(3).find(']') == -1): # Split the size using space and arithmetic operators as delimiters. # If any of the resulting tokens are not compile time constants then # report the error. tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) is_const = True skip_next = False for tok in tokens: if skip_next: skip_next = False continue if Search(r'sizeof\(.+\)', tok): continue if Search(r'arraysize\(\w+\)', tok): continue tok = tok.lstrip('(') tok = tok.rstrip(')') if not tok: continue if Match(r'\d+', tok): continue if Match(r'0[xX][0-9a-fA-F]+', tok): continue if Match(r'k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue # A catch all for tricky sizeof cases, including 'sizeof expression', # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' # requires skipping the next token because we split on ' ' and '*'. if tok.startswith('sizeof'): skip_next = True continue is_const = False break if not is_const: error(filename, linenum, 'runtime/arrays', 1, 'Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size.") # Check for use of unnamed namespaces in header files. Registration # macros are typically OK, so we allow use of "namespace {" on lines # that end with backslashes. if (file_extension == 'h' and Search(r'\bnamespace\s*{', line) and line[-1] != '\\'): error(filename, linenum, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See ' 'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.') def CheckGlobalStatic(filename, clean_lines, linenum, error): """Check for unsafe global or static objects. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Match two lines at a time to support multiline declarations if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line): line += clean_lines.elided[linenum + 1].strip() # Check for people declaring static/global STL strings at the top level. # This is dangerous because the C++ language does not guarantee that # globals with constructors are initialized before the first access, and # also because globals can be destroyed when some threads are still running. # TODO(unknown): Generalize this to also find static unique_ptr instances. # TODO(unknown): File bugs for clang-tidy to find these. match = Match( r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +' r'([a-zA-Z0-9_:]+)\b(.*)', line) # Remove false positives: # - String pointers (as opposed to values). # string *pointer # const string *pointer # string const *pointer # string *const pointer # # - Functions and template specializations. # string Function<Type>(... # string Class<Type>::Method(... # # - Operators. These are matched separately because operator names # cross non-word boundaries, and trying to match both operators # and functions at the same time would decrease accuracy of # matching identifiers. # string Class::operator*() if (match and not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and not Search(r'\boperator\W', line) and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))): if Search(r'\bconst\b', line): error(filename, linenum, 'runtime/string', 4, 'For a static/global string constant, use a C style string ' 'instead: "%schar%s %s[]".' % (match.group(1), match.group(2) or '', match.group(3))) else: error(filename, linenum, 'runtime/string', 4, 'Static/global string variables are not permitted.') if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)): error(filename, linenum, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') def CheckPrintf(filename, clean_lines, linenum, error): """Check for printf related issues. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # When snprintf is used, the second argument shouldn't be a literal. match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if match and match.group(2) != '0': # If 2nd arg is zero, snprintf is used to calculate size. error(filename, linenum, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (match.group(1), match.group(2))) # Check if some verboten C functions are being used. if Search(r'\bsprintf\s*\(', line): error(filename, linenum, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') match = Search(r'\b(strcpy|strcat)\s*\(', line) if match: error(filename, linenum, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % match.group(1)) def IsDerivedFunction(clean_lines, linenum): """Check if current line contains an inherited function. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains a function with "override" virt-specifier. """ # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i]) if match: # Look for "override" after the matching closing parenthesis line, _, closing_paren = CloseExpression( clean_lines, i, len(match.group(1))) return (closing_paren >= 0 and Search(r'\boverride\b', line[closing_paren:])) return False def IsOutOfLineMethodDefinition(clean_lines, linenum): """Check if current line contains an out-of-line method definition. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line contains an out-of-line method definition. """ # Scan back a few lines for start of current function for i in xrange(linenum, max(-1, linenum - 10), -1): if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]): return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None return False def IsInitializerList(clean_lines, linenum): """Check if current line is inside constructor initializer list. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if current line appears to be inside constructor initializer list, False otherwise. """ for i in xrange(linenum, 1, -1): line = clean_lines.elided[i] if i == linenum: remove_function_body = Match(r'^(.*)\{\s*$', line) if remove_function_body: line = remove_function_body.group(1) if Search(r'\s:\s*\w+[({]', line): # A lone colon tend to indicate the start of a constructor # initializer list. It could also be a ternary operator, which # also tend to appear in constructor initializer lists as # opposed to parameter lists. return True if Search(r'\}\s*,\s*$', line): # A closing brace followed by a comma is probably the end of a # brace-initialized member in constructor initializer list. return True if Search(r'[{};]\s*$', line): # Found one of the following: # - A closing brace or semicolon, probably the end of the previous # function. # - An opening brace, probably the start of current class or namespace. # # Current line is probably not inside an initializer list since # we saw one of those things without seeing the starting colon. return False # Got to the beginning of the file without seeing the start of # constructor initializer list. return False def CheckForNonConstReference(filename, clean_lines, linenum, nesting_state, error): """Check for non-const references. Separate from CheckLanguage since it scans backwards from current line, instead of scanning forward. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: The function to call with any errors found. """ # Do nothing if there is no '&' on current line. line = clean_lines.elided[linenum] if '&' not in line: return # If a function is inherited, current function doesn't have much of # a choice, so any non-const references should not be blamed on # derived function. if IsDerivedFunction(clean_lines, linenum): return # Don't warn on out-of-line method definitions, as we would warn on the # in-line declaration, if it isn't marked with 'override'. if IsOutOfLineMethodDefinition(clean_lines, linenum): return # Long type names may be broken across multiple lines, usually in one # of these forms: # LongType # ::LongTypeContinued &identifier # LongType:: # LongTypeContinued &identifier # LongType< # ...>::LongTypeContinued &identifier # # If we detected a type split across two lines, join the previous # line to current line so that we can match const references # accordingly. # # Note that this only scans back one line, since scanning back # arbitrary number of lines would be expensive. If you have a type # that spans more than 2 lines, please use a typedef. if linenum > 1: previous = None if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): # previous_line\n + ::current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', clean_lines.elided[linenum - 1]) elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): # previous_line::\n + current_line previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', clean_lines.elided[linenum - 1]) if previous: line = previous.group(1) + line.lstrip() else: # Check for templated parameter that is split across multiple lines endpos = line.rfind('>') if endpos > -1: (_, startline, startpos) = ReverseCloseExpression( clean_lines, linenum, endpos) if startpos > -1 and startline < linenum: # Found the matching < on an earlier line, collect all # pieces up to current line. line = '' for i in xrange(startline, linenum + 1): line += clean_lines.elided[i].strip() # Check for non-const references in function parameters. A single '&' may # found in the following places: # inside expression: binary & for bitwise AND # inside expression: unary & for taking the address of something # inside declarators: reference parameter # We will exclude the first two cases by checking that we are not inside a # function body, including one that was just introduced by a trailing '{'. # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. if (nesting_state.previous_stack_top and not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or isinstance(nesting_state.previous_stack_top, _NamespaceInfo))): # Not at toplevel, not within a class, and not within a namespace return # Avoid initializer lists. We only need to scan back from the # current line for something that starts with ':'. # # We don't need to check the current line, since the '&' would # appear inside the second set of parentheses on the current line as # opposed to the first set. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 10), -1): previous_line = clean_lines.elided[i] if not Search(r'[),]\s*$', previous_line): break if Match(r'^\s*:\s+\S', previous_line): return # Avoid preprocessors if Search(r'\\\s*$', line): return # Avoid constructor initializer lists if IsInitializerList(clean_lines, linenum): return # We allow non-const references in a few standard places, like functions # called "swap()" or iostream operators like "<<" or ">>". Do not check # those function parameters. # # We also accept & in static_assert, which looks like a function but # it's actually a declaration expression. whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' r'operator\s*[<>][<>]|' r'static_assert|COMPILE_ASSERT' r')\s*\(') if Search(whitelisted_functions, line): return elif not Search(r'\S+\([^)]*$', line): # Don't see a whitelisted function on this line. Actually we # didn't see any function name on this line, so this is likely a # multi-line parameter list. Try a bit harder to catch this case. for i in xrange(2): if (linenum > i and Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): return decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): if (not Match(_RE_PATTERN_CONST_REF_PARAM, parameter) and not Match(_RE_PATTERN_REF_STREAM_PARAM, parameter)): error(filename, linenum, 'runtime/references', 2, 'Is this a non-const reference? ' 'If so, make const or use a pointer: ' + ReplaceAll(' *<', '<', parameter)) def CheckCasts(filename, clean_lines, linenum, error): """Various cast related checks. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] # Check to see if they're using an conversion function cast. # I just try to capture the most common basic types, though there are more. # Parameterless conversion functions, such as bool(), are allowed as they are # probably a member operator declaration or default constructor. match = Search( r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b' r'(int|float|double|bool|char|int32|uint32|int64|uint64)' r'(\([^)].*)', line) expecting_function = ExpectingFunctionArgs(clean_lines, linenum) if match and not expecting_function: matched_type = match.group(2) # matched_new_or_template is used to silence two false positives: # - New operators # - Template arguments with function types # # For template arguments, we match on types immediately following # an opening bracket without any spaces. This is a fast way to # silence the common case where the function type is the first # template argument. False negative with less-than comparison is # avoided because those operators are usually followed by a space. # # function<double(double)> // bracket + no space = false positive # value < double(42) // bracket + space = true positive matched_new_or_template = match.group(1) # Avoid arrays by looking for brackets that come after the closing # parenthesis. if Match(r'\([^()]+\)\s*\[', match.group(3)): return # Other things to ignore: # - Function pointers # - Casts to pointer types # - Placement new # - Alias declarations matched_funcptr = match.group(3) if (matched_new_or_template is None and not (matched_funcptr and (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', matched_funcptr) or matched_funcptr.startswith('(*)'))) and not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and not Search(r'new\(\S+\)\s*' + matched_type, line)): error(filename, linenum, 'readability/casting', 4, 'Using deprecated casting style. ' 'Use static_cast<%s>(...) instead' % matched_type) if not expecting_function: CheckCStyleCast(filename, clean_lines, linenum, 'static_cast', r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". # # (char *) "foo" should always be a const_cast (reinterpret_cast won't # compile). if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): pass else: # Check pointer casts for other than string constants CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) # In addition, we look for people taking the address of a cast. This # is dangerous -- casts can assign to temporaries, so the pointer doesn't # point where you think. # # Some non-identifier character is required before the '&' for the # expression to be recognized as a cast. These are casts: # expression = &static_cast<int*>(temporary()); # function(&(int*)(temporary())); # # This is not a cast: # reference_type&(int* function_param); match = Search( r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|' r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line) if match: # Try a better error message when the & is bound to something # dereferenced by the casted pointer, as opposed to the casted # pointer itself. parenthesis_error = False match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line) if match: _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1))) if x1 >= 0 and clean_lines.elided[y1][x1] == '(': _, y2, x2 = CloseExpression(clean_lines, y1, x1) if x2 >= 0: extended_line = clean_lines.elided[y2][x2:] if y2 < clean_lines.NumLines() - 1: extended_line += clean_lines.elided[y2 + 1] if Match(r'\s*(?:->|\[)', extended_line): parenthesis_error = True if parenthesis_error: error(filename, linenum, 'readability/casting', 4, ('Are you taking an address of something dereferenced ' 'from a cast? Wrapping the dereferenced expression in ' 'parentheses will make the binding more obvious')) else: error(filename, linenum, 'runtime/casting', 4, ('Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after')) def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): """Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise. """ line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False # Exclude lines with keywords that tend to look like casts context = line[0:match.start(1) - 1] if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False # Try expanding current context to see if we one level of # parentheses inside a macro. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False # operator++(int) and operator--(int) if context.endswith(' operator++') or context.endswith(' operator--'): return False # A single unnamed argument for a function tends to look like old style cast. # If we see those, don't issue warnings for deprecated casts. remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', remainder): return False # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True def ExpectingFunctionArgs(clean_lines, linenum): """Checks whether where function type arguments are expected. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if the line at 'linenum' is inside something that expects arguments of function types. """ line = clean_lines.elided[linenum] return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or (linenum >= 2 and (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]) or Search(r'\bstd::m?function\s*\<\s*$', clean_lines.elided[linenum - 1])))) _HEADERS_CONTAINING_TEMPLATES = ( ('<deque>', ('deque',)), ('<functional>', ('unary_function', 'binary_function', 'plus', 'minus', 'multiplies', 'divides', 'modulus', 'negate', 'equal_to', 'not_equal_to', 'greater', 'less', 'greater_equal', 'less_equal', 'logical_and', 'logical_or', 'logical_not', 'unary_negate', 'not1', 'binary_negate', 'not2', 'bind1st', 'bind2nd', 'pointer_to_unary_function', 'pointer_to_binary_function', 'ptr_fun', 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', 'mem_fun_ref_t', 'const_mem_fun_t', 'const_mem_fun1_t', 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', 'mem_fun_ref', )), ('<limits>', ('numeric_limits',)), ('<list>', ('list',)), ('<map>', ('map', 'multimap',)), ('<memory>', ('allocator', 'make_shared', 'make_unique', 'shared_ptr', 'unique_ptr', 'weak_ptr')), ('<queue>', ('queue', 'priority_queue',)), ('<set>', ('set', 'multiset',)), ('<stack>', ('stack',)), ('<string>', ('char_traits', 'basic_string',)), ('<tuple>', ('tuple',)), ('<unordered_map>', ('unordered_map', 'unordered_multimap')), ('<unordered_set>', ('unordered_set', 'unordered_multiset')), ('<utility>', ('pair',)), ('<vector>', ('vector',)), # gcc extensions. # Note: std::hash is their hash, ::hash is our hash ('<hash_map>', ('hash_map', 'hash_multimap',)), ('<hash_set>', ('hash_set', 'hash_multiset',)), ('<slist>', ('slist',)), ) _HEADERS_MAYBE_TEMPLATES = ( ('<algorithm>', ('copy', 'max', 'min', 'min_element', 'sort', 'transform', )), ('<utility>', ('forward', 'make_pair', 'move', 'swap')), ) _RE_PATTERN_STRING = re.compile(r'\bstring\b') _re_pattern_headers_maybe_templates = [] for _header, _templates in _HEADERS_MAYBE_TEMPLATES: for _template in _templates: # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or # type::max(). _re_pattern_headers_maybe_templates.append( (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), _template, _header)) # Other scripts may reach in and modify this pattern. _re_pattern_templates = [] for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: for _template in _templates: _re_pattern_templates.append( (re.compile(r'(\<|\b)' + _template + r'\s*\<'), _template + '<>', _header)) def FilesBelongToSameModule(filename_cc, filename_h): """Check if these two filenames belong to the same module. The concept of a 'module' here is a as follows: foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the same 'module' if they are in the same directory. some/path/public/xyzzy and some/path/internal/xyzzy are also considered to belong to the same module here. If the filename_cc contains a longer path than the filename_h, for example, '/absolute/path/to/base/sysinfo.cc', and this file would include 'base/sysinfo.h', this function also produces the prefix needed to open the header. This is used by the caller of this function to more robustly open the header file. We don't have access to the real include paths in this context, so we need this guesswork here. Known bugs: tools/base/bar.cc and base/bar.h belong to the same module according to this implementation. Because of this, this function gives some false positives. This should be sufficiently rare in practice. Args: filename_cc: is the path for the .cc file filename_h: is the path for the header path Returns: Tuple with a bool and a string: bool: True if filename_cc and filename_h belong to the same module. string: the additional prefix needed to open the header file. """ fileinfo = FileInfo(filename_cc) if not fileinfo.IsSource(): return (False, '') filename_cc = filename_cc[:-len(fileinfo.Extension())] matched_test_suffix = Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()) if matched_test_suffix: filename_cc = filename_cc[:-len(matched_test_suffix.group(1))] filename_cc = filename_cc.replace('/public/', '/') filename_cc = filename_cc.replace('/internal/', '/') if not filename_h.endswith('.h'): return (False, '') filename_h = filename_h[:-len('.h')] if filename_h.endswith('-inl'): filename_h = filename_h[:-len('-inl')] filename_h = filename_h.replace('/public/', '/') filename_h = filename_h.replace('/internal/', '/') files_belong_to_same_module = filename_cc.endswith(filename_h) common_path = '' if files_belong_to_same_module: common_path = filename_cc[:-len(filename_h)] return files_belong_to_same_module, common_path def UpdateIncludeState(filename, include_dict, io=codecs): """Fill up the include_dict with new includes found from the file. Args: filename: the name of the header to read. include_dict: a dictionary in which the headers are inserted. io: The io factory to use to read the file. Provided for testability. Returns: True if a header was successfully added. False otherwise. """ headerfile = None try: headerfile = io.open(filename, 'r', 'utf8', 'replace') except IOError: return False linenum = 0 for line in headerfile: linenum += 1 clean_line = CleanseComments(line) match = _RE_PATTERN_INCLUDE.search(clean_line) if match: include = match.group(2) include_dict.setdefault(include, linenum) return True def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, io=codecs): """Reports for missing stl includes. This function will output warnings to make sure you are including the headers necessary for the stl containers and functions that you use. We only give one reason to include a header. For example, if you use both equal_to<> and less<> in a .h file, only one (the latter in the file) of these will be reported as a reason to include the <functional>. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. include_state: An _IncludeState instance. error: The function to call with any errors found. io: The IO factory to use to read the header file. Provided for unittest injection. """ required = {} # A map of header name to linenumber and the template entity. # Example of required: { '<functional>': (1219, 'less<>') } for linenum in xrange(clean_lines.NumLines()): line = clean_lines.elided[linenum] if not line or line[0] == '#': continue # String is special -- it is a non-templatized type in STL. matched = _RE_PATTERN_STRING.search(line) if matched: # Don't warn about strings in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required['<string>'] = (linenum, 'string') for pattern, template, header in _re_pattern_headers_maybe_templates: if pattern.search(line): required[header] = (linenum, template) # The following function is just a speed up, no semantics are changed. if not '<' in line: # Reduces the cpu time usage by skipping lines. continue for pattern, template, header in _re_pattern_templates: matched = pattern.search(line) if matched: # Don't warn about IWYU in non-STL namespaces: # (We check only the first match per line; good enough.) prefix = line[:matched.start()] if prefix.endswith('std::') or not prefix.endswith('::'): required[header] = (linenum, template) # The policy is that if you #include something in foo.h you don't need to # include it again in foo.cc. Here, we will look at possible includes. # Let's flatten the include_state include_list and copy it into a dictionary. include_dict = dict([item for sublist in include_state.include_list for item in sublist]) # Did we find the header for this file (if any) and successfully load it? header_found = False # Use the absolute path so that matching works properly. abs_filename = FileInfo(filename).FullName() # For Emacs's flymake. # If cpplint is invoked from Emacs's flymake, a temporary file is generated # by flymake and that file name might end with '_flymake.cc'. In that case, # restore original file name here so that the corresponding header file can be # found. # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' # instead of 'foo_flymake.h' abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) # include_dict is modified during iteration, so we iterate over a copy of # the keys. header_keys = include_dict.keys() for header in header_keys: (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) fullpath = common_path + header if same_module and UpdateIncludeState(fullpath, include_dict, io): header_found = True # If we can't find the header file for a .cc, assume it's because we don't # know where to look. In that case we'll give up as we're not sure they # didn't include it in the .h file. # TODO(unknown): Do a better job of finding .h files so we are confident that # not having the .h file means there isn't one. if filename.endswith('.cc') and not header_found: return # All the lines have been processed, report the errors found. for required_header_unstripped in required: template = required[required_header_unstripped][1] if required_header_unstripped.strip('<>"') not in include_dict: error(filename, required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template) _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): """Check that make_pair's template arguments are deduced. G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are specified explicitly, and such use isn't intended in any case. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) if match: error(filename, linenum, 'build/explicit_make_pair', 4, # 4 = high confidence 'For C++11-compatibility, omit template arguments from make_pair' ' OR use pair directly OR if appropriate, construct a pair directly') def CheckRedundantVirtual(filename, clean_lines, linenum, error): """Check if line contains a redundant "virtual" function-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Look for "virtual" on current line. line = clean_lines.elided[linenum] virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line) if not virtual: return # Ignore "virtual" keywords that are near access-specifiers. These # are only used in class base-specifier and do not apply to member # functions. if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or Match(r'^\s+(public|protected|private)\b', virtual.group(3))): return # Ignore the "virtual" keyword from virtual base classes. Usually # there is a column on the same line in these cases (virtual base # classes are rare in google3 because multiple inheritance is rare). if Match(r'^.*[^:]:[^:].*$', line): return # Look for the next opening parenthesis. This is the start of the # parameter list (possibly on the next line shortly after virtual). # TODO(unknown): doesn't work if there are virtual functions with # decltype() or other things that use parentheses, but csearch suggests # that this is rare. end_col = -1 end_line = -1 start_col = len(virtual.group(2)) for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())): line = clean_lines.elided[start_line][start_col:] parameter_list = Match(r'^([^(]*)\(', line) if parameter_list: # Match parentheses to find the end of the parameter list (_, end_line, end_col) = CloseExpression( clean_lines, start_line, start_col + len(parameter_list.group(1))) break start_col = 0 if end_col < 0: return # Couldn't find end of parameter list, give up # Look for "override" or "final" after the parameter list # (possibly on the next few lines). for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())): line = clean_lines.elided[i][end_col:] match = Search(r'\b(override|final)\b', line) if match: error(filename, linenum, 'readability/inheritance', 4, ('"virtual" is redundant since function is ' 'already declared as "%s"' % match.group(1))) # Set end_col to check whole lines after we are done with the # first line. end_col = 0 if Search(r'[^\w]\s*$', line): break def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error): """Check if line contains a redundant "override" or "final" virt-specifier. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ # Look for closing parenthesis nearby. We need one to confirm where # the declarator ends and where the virt-specifier starts to avoid # false positives. line = clean_lines.elided[linenum] declarator_end = line.rfind(')') if declarator_end >= 0: fragment = line[declarator_end:] else: if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0: fragment = line else: return # Check that at most one of "override" or "final" is present, not both if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment): error(filename, linenum, 'readability/inheritance', 4, ('"override" is redundant since function is ' 'already declared as "final"')) # Returns true if we are at a new block, and it is directly # inside of a namespace. def IsBlockInNameSpace(nesting_state, is_forward_declaration): """Checks that the new block is directly in a namespace. Args: nesting_state: The _NestingState object that contains info about our state. is_forward_declaration: If the class is a forward declared class. Returns: Whether or not the new block is directly in a namespace. """ if is_forward_declaration: if len(nesting_state.stack) >= 1 and ( isinstance(nesting_state.stack[-1], _NamespaceInfo)): return True else: return False return (len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isinstance(nesting_state.stack[-2], _NamespaceInfo)) def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item, raw_lines_no_comments, linenum): """This method determines if we should apply our namespace indentation check. Args: nesting_state: The current nesting state. is_namespace_indent_item: If we just put a new class on the stack, True. If the top of the stack is not a class, or we did not recently add the class, False. raw_lines_no_comments: The lines without the comments. linenum: The current line number we are processing. Returns: True if we should apply our namespace indentation check. Currently, it only works for classes and namespaces inside of a namespace. """ is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments, linenum) if not (is_namespace_indent_item or is_forward_declaration): return False # If we are in a macro, we do not want to check the namespace indentation. if IsMacroDefinition(raw_lines_no_comments, linenum): return False return IsBlockInNameSpace(nesting_state, is_forward_declaration) # Call this method if the line is directly inside of a namespace. # If the line above is blank (excluding comments) or the start of # an inner namespace, it cannot be indented. def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum, error): line = raw_lines_no_comments[linenum] if Match(r'^\s+', line): error(filename, linenum, 'runtime/indentation_namespace', 4, 'Do not indent within a namespace') def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): """Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error) if nesting_state.InAsmBlock(): return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) CheckRedundantVirtual(filename, clean_lines, line, error) CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error) def FlagCxx11Features(filename, clean_lines, linenum, error): """Flag those c++11 features that we only allow in certain places. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) # Flag unapproved C++ TR1 headers. if include and include.group(1).startswith('tr1/'): error(filename, linenum, 'build/c++tr1', 5, ('C++ TR1 headers such as <%s> are unapproved.') % include.group(1)) # Flag unapproved C++11 headers. if include and include.group(1) in ('cfenv', 'condition_variable', 'fenv.h', 'future', 'mutex', 'thread', 'chrono', 'ratio', 'regex', 'system_error', ): error(filename, linenum, 'build/c++11', 5, ('<%s> is an unapproved C++11 header.') % include.group(1)) # The only place where we need to worry about C++11 keywords and library # features in preprocessor directives is in macro definitions. if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return # These are classes and free functions. The classes are always # mentioned as std::*, but we only catch the free functions if # they're not found by ADL. They're alphabetical by header. for top_name in ( # type_traits 'alignment_of', 'aligned_union', ): if Search(r'\bstd::%s\b' % top_name, line): error(filename, linenum, 'build/c++11', 5, ('std::%s is an unapproved C++11 class or function. Send c-style ' 'an example of where it would make your code more readable, and ' 'they may let you use it.') % top_name) def FlagCxx14Features(filename, clean_lines, linenum, error): """Flag those C++14 features that we restrict. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[linenum] include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line) # Flag unapproved C++14 headers. if include and include.group(1) in ('scoped_allocator', 'shared_mutex'): error(filename, linenum, 'build/c++14', 5, ('<%s> is an unapproved C++14 header.') % include.group(1)) def ProcessFileData(filename, file_extension, lines, error, extra_check_functions=[]): """Performs lint checks and reports any errors to the given error function. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. lines: An array of strings, each representing a line of the file, with the last element being empty if the file is terminated with a newline. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ lines = (['// marker so line numbers and indices both start at 1'] + lines + ['// marker so line numbers end in a known way']) include_state = _IncludeState() function_state = _FunctionState() nesting_state = NestingState() ResetNolintSuppressions() CheckForCopyright(filename, lines, error) ProcessGlobalSuppresions(lines) RemoveMultiLineComments(filename, lines, error) clean_lines = CleansedLines(lines) if file_extension == 'h': CheckForHeaderGuard(filename, clean_lines, error) for line in xrange(clean_lines.NumLines()): ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions) FlagCxx11Features(filename, clean_lines, line, error) nesting_state.CheckCompletedBlocks(filename, error) CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) # Check that the .cc file has included its header if it exists. if _IsSourceExtension(file_extension): CheckHeaderFileIncluded(filename, include_state, error) # We check here rather than inside ProcessLine so that we see raw # lines rather than "cleaned" lines. CheckForBadCharacters(filename, lines, error) CheckForNewlineAtEOF(filename, lines, error) def ProcessConfigOverrides(filename): """ Loads the configuration files and processes the config overrides. Args: filename: The name of the file being processed by the linter. Returns: False if the current |filename| should not be processed further. """ abs_filename = os.path.abspath(filename) cfg_filters = [] keep_looking = True while keep_looking: abs_path, base_name = os.path.split(abs_filename) if not base_name: break # Reached the root directory. cfg_file = os.path.join(abs_path, "CPPLINT.cfg") abs_filename = abs_path if not os.path.isfile(cfg_file): continue try: with open(cfg_file) as file_handle: for line in file_handle: line, _, _ = line.partition('#') # Remove comments. if not line.strip(): continue name, _, val = line.partition('=') name = name.strip() val = val.strip() if name == 'set noparent': keep_looking = False elif name == 'filter': cfg_filters.append(val) elif name == 'exclude_files': # When matching exclude_files pattern, use the base_name of # the current file name or the directory name we are processing. # For example, if we are checking for lint errors in /foo/bar/baz.cc # and we found the .cfg file at /foo/CPPLINT.cfg, then the config # file's "exclude_files" filter is meant to be checked against "bar" # and not "baz" nor "bar/baz.cc". if base_name: pattern = re.compile(val) if pattern.match(base_name): sys.stderr.write('Ignoring "%s": file excluded by "%s". ' 'File path component "%s" matches ' 'pattern "%s"\n' % (filename, cfg_file, base_name, val)) return False elif name == 'linelength': global _line_length try: _line_length = int(val) except ValueError: sys.stderr.write('Line length must be numeric.') else: sys.stderr.write( 'Invalid configuration option (%s) in file %s\n' % (name, cfg_file)) except IOError: sys.stderr.write( "Skipping config file '%s': Can't open for reading\n" % cfg_file) keep_looking = False # Apply all the accumulated filters in reverse order (top-level directory # config options having the least priority). for filter in reversed(cfg_filters): _AddFilters(filter) return True def ProcessFile(filename, vlevel, extra_check_functions=[]): """Does google-lint on a single file. Args: filename: The name of the file to parse. vlevel: The level of errors to report. Every error of confidence >= verbose_level will be reported. 0 is a good default. extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error """ _SetVerboseLevel(vlevel) _BackupFilters() if not ProcessConfigOverrides(filename): _RestoreFilters() return lf_lines = [] crlf_lines = [] try: # Support the UNIX convention of using "-" for stdin. Note that # we are not opening the file with universal newline support # (which codecs doesn't support anyway), so the resulting lines do # contain trailing '\r' characters if we are reading a file that # has CRLF endings. # If after the split a trailing '\r' is present, it is removed # below. if filename == '-': lines = codecs.StreamReaderWriter(sys.stdin, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace').read().split('\n') else: lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') # Remove trailing '\r'. # The -1 accounts for the extra trailing blank line we get from split() for linenum in range(len(lines) - 1): if lines[linenum].endswith('\r'): lines[linenum] = lines[linenum].rstrip('\r') crlf_lines.append(linenum + 1) else: lf_lines.append(linenum + 1) except IOError: sys.stderr.write( "Skipping input '%s': Can't open for reading\n" % filename) _RestoreFilters() return # Note, if no dot is found, this will give the entire filename as the ext. file_extension = filename[filename.rfind('.') + 1:] # When reading from stdin, the extension is unknown, so no cpplint tests # should rely on the extension. if filename != '-' and file_extension not in _valid_extensions: sys.stderr.write('Ignoring %s; not a valid file name ' '(%s)\n' % (filename, ', '.join(_valid_extensions))) else: ProcessFileData(filename, file_extension, lines, Error, extra_check_functions) # If end-of-line sequences are a mix of LF and CR-LF, issue # warnings on the lines with CR. # # Don't issue any warnings if all lines are uniformly LF or CR-LF, # since critique can handle these just fine, and the style guide # doesn't dictate a particular end of line sequence. # # We can't depend on os.linesep to determine what the desired # end-of-line sequence should be, since that will return the # server-side end-of-line sequence. if lf_lines and crlf_lines: # Warn on every line with CR. An alternative approach might be to # check whether the file is mostly CRLF or just LF, and warn on the # minority, we bias toward LF here since most tools prefer LF. for linenum in crlf_lines: Error(filename, linenum, 'whitespace/newline', 1, 'Unexpected \\r (^M) found; better to use only \\n') sys.stderr.write('Done processing %s\n' % filename) _RestoreFilters() def PrintUsage(message): """Prints a brief usage string and exits, optionally with an error message. Args: message: The optional error message. """ sys.stderr.write(_USAGE) if message: sys.exit('\nFATAL ERROR: ' + message) else: sys.exit(1) def PrintCategories(): """Prints a list of all the error-categories used by error messages. These are the categories used to filter messages via --filter. """ sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) sys.exit(0) def ParseArguments(args): """Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint. """ try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'linelength=', 'extensions=', 'project_root=']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' counting_style = '' for (opt, val) in opts: if opt == '--help': PrintUsage(None) elif opt == '--output': if val not in ('emacs', 'vs7', 'eclipse'): PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') output_format = val elif opt == '--verbose': verbosity = int(val) elif opt == '--filter': filters = val if not filters: PrintCategories() elif opt == '--counting': if val not in ('total', 'toplevel', 'detailed'): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif opt == '--root': global _root _root = val elif opt == '--project_root': global _project_root _project_root = val if not os.path.isabs(_project_root): PrintUsage('Project root must be an absolute path.') elif opt == '--linelength': global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif opt == '--extensions': global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma separated list.') if not filenames: PrintUsage('No files were specified.') _SetOutputFormat(output_format) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames def main(): filenames = ParseArguments(sys.argv[1:]) # Change stderr to write with replacement characters so we don't die # if we try to print something containing non-ASCII characters. sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace') _cpplint_state.ResetErrorCounts() for filename in filenames: ProcessFile(filename, _cpplint_state.verbose_level) _cpplint_state.PrintErrorCounts() sys.exit(_cpplint_state.error_count > 0) if __name__ == '__main__': main()
'Extra space after (') if (Search(r'\w\s+\(', fncall) and not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
rewritepass.py
#!/usr/bin/python import os import re import sys if len(sys.argv) != 3 and len(sys.argv) != 4: print("Use: %s <PassRegistry.def path> <passes> [run-tests]" % sys.argv[0]) exit(1) passregpath = sys.argv[1] def skip_first_pass(s): count = 0 for i in range(len(s)): c = s[i] if c == '(': count += 1 elif c == ')': count -= 1 if count == 0: return s[i+2:] return '' def wrap_str(arg, lst): for e in lst: arg = "%s(%s)" % (e, arg) return arg def wrap(args): passes = args.split(',') pass_types = { "module" : [], "cgscc" : [], "function" : [], "loop" : ["function"], "loop-mssa" : ["function"], } firstpass = '' type = None skip = ['verify', 'invalidate<all>'] for p in passes: if not any(p.startswith(s) for s in skip): firstpass = p break # decorated already: function(foo) for ty,lst in pass_types.items(): if firstpass.startswith(ty + '('): if lst: return wrap_str(args, lst) # check if we have function(foo), globalopt next_pass = args while True: next_pass = skip_first_pass(next_pass) if not next_pass: return args next_pass = wrap(next_pass) if next_pass.startswith(ty + '('): continue # function(x), cgscc(y) for ty,lst in pass_types.items(): if next_pass.startswith(ty + '('): return wrap_str(args, ['module']) override = { # pass -> (type, prepend-type?) 'devirt<' : ('cgscc', True), 'loop-mssa' : ('loop', False), } for arg,(ty,prepend) in override.items(): if firstpass.startswith(arg): return wrap_str(args, ([ty] if prepend else []) + pass_types[ty]) # strip e.g. require<foo> -> foo strip = [ r'require<([^>]+)>', r'repeat<\d+>\(([^)]+)\)', r'invalidate<([^>]+)>', r'<[^>]+>()' ] for s in strip: firstpass = re.sub(s, '\\1', firstpass) # check LLVM's PassRegistry.def file txt = open(passregpath, 'r').read() p = re.escape(firstpass) m = re.search(r'^([A-Z_]+)_(?:PASS|ANALYSIS)[A-Z_]*\("' + p, txt, re.MULTILINE) if m is None: return wrap_str(args, ['module']) type = m.group(1) # Some loop passes must use loop-mssa instead of loop # And there's no place to get this info
loop_mssa = { 'licm', 'simple-loop-unswitch', } if p in loop_mssa: type = 'LOOP-MSSA' type = { 'CGSCC' : 'cgscc', 'FUNCTION' : 'function', 'FUNCTION_ALIAS' : 'function', 'LOOP' : 'loop', 'LOOPNEST' : 'loop', 'LOOP-MSSA' : 'loop-mssa', 'MODULE' : 'module', 'MODULE_ALIAS' : 'module', }[type] return wrap_str(args, [type] + pass_types[type]) def run_opt(passes): error = os.popen('echo "" | opt -passes="%s" -disable-output 2>&1' % passes).close() return error is None if len(sys.argv) == 3: print(wrap(sys.argv[2].strip("'\""))) else: tests = [ ('sroa', 'function(sroa)'), ('simplifycfg', 'function(simplifycfg)'), ('licm', 'function(loop-mssa(licm))'), ('loop-mssa(licm)', 'function(loop-mssa(licm))'), ('argpromotion', 'cgscc(argpromotion)'), ('loop-extract', 'module(loop-extract)'), ('loop-mssa(simple-loop-unswitch<nontrivial>)', 'function(loop-mssa(simple-loop-unswitch<nontrivial>))'), ('sroa,verify', 'function(sroa,verify)'), ('verify,sroa', 'function(verify,sroa)'), ('loop-mssa(loop-instsimplify)', 'function(loop-mssa(loop-instsimplify))'), ('loop-unroll-and-jam', 'function(loop(loop-unroll-and-jam))'), ('require<basic-aa>,sroa', 'function(require<basic-aa>,sroa)'), ('cgscc(repeat<2>(inline,function(dce)))', 'cgscc(repeat<2>(inline,function(dce)))'), ('repeat<2>(sroa)', 'function(repeat<2>(sroa))'), ('cgscc(devirt<4>(inline))', 'cgscc(devirt<4>(inline))'), ('devirt<1>(inline,function(gvn))', 'cgscc(devirt<1>(inline,function(gvn)))'), ('require<opt-remark-emit>,loop(loop-unroll-full)', 'function(require<opt-remark-emit>,loop(loop-unroll-full))'), ('invalidate<domtree>,early-cse<memssa>', 'function(invalidate<domtree>,early-cse<memssa>)'), ('function(loop-vectorize,instcombine)', 'function(loop-vectorize,instcombine)'), ('function(loop-vectorize),function(instcombine)', 'function(loop-vectorize),function(instcombine)'), ('function(loop-vectorize),function(instcombine),globalopt', 'module(function(loop-vectorize),function(instcombine),globalopt)'), ('function(ee-instrument),function(ee-instrument),cgscc(inline),function(ee-instrument<post-inline>)', 'module(function(ee-instrument),function(ee-instrument),cgscc(inline),function(ee-instrument<post-inline>))'), ('function(print<demanded-bits>),attributor', 'module(function(print<demanded-bits>),attributor)'), ('function(tailcallelim),cgscc(inline)', 'module(function(tailcallelim),cgscc(inline))'), ('function(slp-vectorizer),module(hotcoldsplit)', 'module(function(slp-vectorizer),module(hotcoldsplit))'), ('verify', 'module(verify)'), ('default<O2>', 'module(default<O2>)') ] for i,o in tests: if wrap(i) != o: print('FAIL:', i) print('Got:', wrap(i)) print('Expected:', o) print() elif not run_opt(i): print('FAIL running input:', i, '\n') elif not run_opt(o + ',globalopt'): print('FAIL running output:', o, '\n') else: print('PASS:', i)
mensajes.module.ts
import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import { FormsModule } from '@angular/forms'; import { IonicModule } from '@ionic/angular'; import { MensajesPageRoutingModule } from './mensajes-routing.module'; import { MensajesPage } from './mensajes.page'; @NgModule({
IonicModule, MensajesPageRoutingModule ], declarations: [MensajesPage] }) export class MensajesPageModule {}
imports: [ CommonModule, FormsModule,
serialLoopCaller.go
package core import ( "context" "github.com/opctl/opctl/sdks/go/opspec/interpreter/call/loop" "github.com/opctl/opctl/sdks/go/opspec/interpreter/call/loop/iteration" "github.com/opctl/opctl/sdks/go/opspec/interpreter/call/serialloop" "github.com/opctl/opctl/sdks/go/internal/uniquestring" "github.com/opctl/opctl/sdks/go/model" ) //counterfeiter:generate -o internal/fakes/serialLoopCaller.go . serialLoopCaller type serialLoopCaller interface { // Executes a serial loop call Call( ctx context.Context, eventChannel chan model.Event, id string, inboundScope map[string]*model.Value, callSpecSerialLoop model.SerialLoopCallSpec, opPath string, parentCallID *string, rootCallID string, ) ( map[string]*model.Value, error, ) } func
( caller caller, ) serialLoopCaller { return _serialLoopCaller{ caller: caller, } } type _serialLoopCaller struct { caller caller } func (lpr _serialLoopCaller) Call( ctx context.Context, eventChannel chan model.Event, id string, inboundScope map[string]*model.Value, callSpecSerialLoop model.SerialLoopCallSpec, opPath string, parentCallID *string, rootCallID string, ) ( map[string]*model.Value, error, ) { index := 0 scope, err := iteration.Scope( index, inboundScope, callSpecSerialLoop.Range, callSpecSerialLoop.Vars, ) if err != nil { return nil, err } // interpret initial iteration of the loop callSerialLoop, err := serialloop.Interpret( callSpecSerialLoop, scope, ) if err != nil { return nil, err } for !serialloop.IsIterationComplete(index, callSerialLoop) { var callID string callID, err = uniquestring.Construct() if err != nil { return nil, err } outputs, err := lpr.caller.Call( ctx, eventChannel, callID, scope, &callSpecSerialLoop.Run, opPath, parentCallID, rootCallID, ) if err != nil { return nil, err } for name, value := range outputs { scope[name] = value } index++ if serialloop.IsIterationComplete(index, callSerialLoop) { break } scope, err = iteration.Scope( index, scope, callSpecSerialLoop.Range, callSpecSerialLoop.Vars, ) if err != nil { return nil, err } // interpret next iteration of the loop callSerialLoop, err = serialloop.Interpret( callSpecSerialLoop, scope, ) if err != nil { return nil, err } } outboundScope := loop.DeScope( inboundScope, callSpecSerialLoop.Range, callSpecSerialLoop.Vars, scope, ) return outboundScope, err }
newSerialLoopCaller
shootout-ackermann.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use std::os; use std::str::from_str; fn ack(m: int, n: int) -> int { if m == 0 { return n + 1 } else { if n == 0 { return ack(m - 1, 1); } else { return ack(m - 1, ack(m, n - 1)); } } } fn main() { let args = os::args(); let args = if os::getenv("RUST_BENCH").is_some() { vec!("".to_string(), "12".to_string()) } else if args.len() <= 1u { vec!("".to_string(), "8".to_string()) } else { args.into_iter().collect() };
let n = from_str::<int>(args[1].as_slice()).unwrap(); println!("Ack(3,{}): {}\n", n, ack(3, n)); }
dsetUtilTest.py
############################################################################## # Copyright by The HDF Group. # # All rights reserved. # # # # This file is part of HSDS (HDF5 Scalable Data Service), Libraries and # # Utilities. The full HSDS copyright notice, including # # terms governing use, modification, and redistribution, is contained in # # the file COPYING, which can be found at the root of the source code # # distribution tree. If you do not have access to this file, you may # # request a copy from [email protected]. # ############################################################################## import unittest import sys sys.path.append('../../hsds/util') sys.path.append('../../hsds') from dsetUtil import getHyperslabSelection, getSelectionShape from dsetUtil import ItemIterator, getEvalStr class DsetUtilTest(unittest.TestCase): def __init__(self, *args, **kwargs): super(DsetUtilTest, self).__init__(*args, **kwargs) # main def testGetHyperslabSelection(self): # getHyperslabSelection(dsetshape, start, stop, step) # 1-D case datashape = [100,] slices = getHyperslabSelection(datashape) self.assertEqual(len(slices), 1) self.assertEqual(slices[0], slice(0, 100, 1)) slices = getHyperslabSelection(datashape, 20) self.assertEqual(len(slices), 1) self.assertEqual(slices[0], slice(20, 100, 1)) slices = getHyperslabSelection(datashape, 20, 80) self.assertEqual(len(slices), 1) self.assertEqual(slices[0], slice(20, 80, 1)) slices = getHyperslabSelection(datashape, 20, 80, 2) self.assertEqual(len(slices), 1) self.assertEqual(slices[0], slice(20, 80, 2)) datashape = [100, 50] slices = getHyperslabSelection(datashape) self.assertEqual(len(slices), 2) self.assertEqual(slices[0], slice(0, 100, 1)) self.assertEqual(slices[1], slice(0, 50, 1)) slices = getHyperslabSelection(datashape, (10, 20)) self.assertEqual(len(slices), 2) self.assertEqual(slices[0], slice(10, 100, 1)) self.assertEqual(slices[1], slice(20, 50, 1)) slices = getHyperslabSelection(datashape, (10, 20), (90, 30)) self.assertEqual(len(slices), 2) self.assertEqual(slices[0], slice(10, 90, 1)) self.assertEqual(slices[1], slice(20, 30, 1)) slices = getHyperslabSelection(datashape, (10, 20), (90, 30), (1,2)) self.assertEqual(len(slices), 2) self.assertEqual(slices[0], slice(10, 90, 1)) self.assertEqual(slices[1], slice(20, 30, 2)) def testGetSelectionShape(self): sel = [ slice(3,7,1), ] shape = getSelectionShape(sel) self.assertEqual(shape, [4,]) sel = [ slice(3,7,3), ] # select points 3, 6 shape = getSelectionShape(sel) self.assertEqual(shape, [2,]) sel = [ slice(44,52,1), slice(48,52,1) ] shape = getSelectionShape(sel) self.assertEqual(shape, [8,4]) sel = [ slice(0, 4, 2), ] # select points 0, 2 shape = getSelectionShape(sel) self.assertEqual(shape, [2,]) sel = [ slice(0, 5, 2), ] # select points 0, 2, 4 shape = getSelectionShape(sel) self.assertEqual(shape, [3,]) def testGetEvalStr(self): queries = { "date == 23": "rows['date'] == 23", "wind == b'W 5'": "rows['wind'] == b'W 5'", "temp > 61": "rows['temp'] > 61", "(date >=22) & (date <= 24)": "(rows['date'] >=22) & (rows['date'] <= 24)", "(date == 21) & (temp > 70)": "(rows['date'] == 21) & (rows['temp'] > 70)", "(wind == b'E 7') | (wind == b'S 7')": "(rows['wind'] == b'E 7') | (rows['wind'] == b'S 7')" } fields = ["date", "wind", "temp"] for query in queries.keys(): eval_str = getEvalStr(query, "rows", fields) self.assertEqual(eval_str, queries[query]) #print(query, "->", eval_str) def testBadQuery(self): queries = ( "foobar", # no variable used "wind = b'abc", # non-closed literal "(wind = b'N') & (temp = 32", # missing paren "foobar > 42", # invalid field name "import subprocess; subprocess.call(['ls', '/'])") # injection attack fields = ("date", "wind", "temp" ) for query in queries: try: eval_str = getEvalStr(query, "x", fields) self.assertTrue(False) # shouldn't get here except Exception:
# 1-D case datashape = [10,] slices = getHyperslabSelection(datashape) it = ItemIterator(slices) indices = [] count = 0 while True: try: index = it.next() count += 1 indices.append(index) except StopIteration: break self.assertEqual(count, 10) self.assertEqual(indices, list(range(10))) # 2-D case datashape = [4, 5] slices = getHyperslabSelection(datashape) it = ItemIterator(slices) indices = [] count = 0 while True: try: index = it.next() self.assertTrue(len(index), 2) self.assertTrue(index[0] >= 0) self.assertTrue(index[0] < 4) self.assertTrue(index[1] >= 0) self.assertTrue(index[1] < 5) count += 1 indices.append(index) except StopIteration: break self.assertEqual(count, 20) if __name__ == '__main__': #setup test files unittest.main()
pass # ok def testItemIterator(self):
source_tree.rs
use super::{Input, Module, ModuleOrigin}; use crate::error::Error; use petgraph::{algo::Cycle, graph::NodeIndex, Direction}; use std::collections::{HashMap, HashSet}; #[derive(Debug, Default)] pub struct SourceTree { graph: petgraph::Graph<String, ()>, indexes: HashMap<String, NodeIndex>, modules: HashMap<NodeIndex, Module>, } impl SourceTree {
pub fn new(inputs: Vec<Input>) -> Result<Self, Error> { let mut graph: Self = Default::default(); for input in inputs { graph.insert(input)?; } graph.calculate_dependencies()?; Ok(graph) } pub fn consume(&mut self) -> Result<impl Iterator<Item = Module> + '_, Error> { let iter = petgraph::algo::toposort(&self.graph, None) .map_err(|e| self.import_cycle(e))? .into_iter() .map(move |i| { self.modules .remove(&i) .expect("SourceTree.consume(): Unknown graph index") }); Ok(iter) } fn import_cycle(&mut self, cycle: Cycle<NodeIndex>) -> Error { let origin = cycle.node_id(); let mut path = vec![]; let _ = self.find_cycle(origin, origin, &mut path, &mut HashSet::new()); let modules: Vec<_> = path .iter() .map(|index| { self.modules .remove(index) .expect("SourceTree.import_cycle(): cannot find module for index") .module .name .join("/") }) .collect(); Error::ImportCycle { modules } } fn find_cycle( &self, origin: NodeIndex, parent: NodeIndex, path: &mut Vec<NodeIndex>, seen: &mut HashSet<NodeIndex>, ) -> bool { let _ = seen.insert(parent); for node in self.graph.neighbors_directed(parent, Direction::Outgoing) { if node == origin { path.push(node); return true; } if seen.contains(&node) { continue; } if self.find_cycle(origin, node, path, seen) { path.push(node); return true; } } false } fn calculate_dependencies(&mut self) -> Result<(), Error> { for module in self.modules.values() { let module_name = module.module.name_string(); let src = module.src.clone(); let path = module.path.clone(); let deps = module.module.dependencies(crate::build::Target::Erlang); let &module_index = self.indexes.get(&module_name).expect( "SourceTree.calculate_dependencies(): Unable to find module index for name", ); let module = self .modules .get(&module_index) .expect("SourceTree.calculate_dependencies(): Unable to find module for index"); for (dep, location) in deps { if dep == "gleam" { continue; } let &dep_index = self.indexes.get(&dep).ok_or_else(|| Error::UnknownImport { module: module_name.clone(), import: dep.clone(), src: src.clone(), path: path.clone(), modules: self .modules .values() .map(|m| m.module.name_string()) .collect(), location, })?; if module.origin == ModuleOrigin::Src && self .modules .get(&dep_index) .expect("SourceTree.calculate_dependencies(): Unable to find module for dep index") .origin == ModuleOrigin::Test { return Err(Error::SrcImportingTest { path, src, location, src_module: module_name, test_module: dep, }); } let _ = self.graph.add_edge(dep_index, module_index, ()); } } Ok(()) } fn insert(&mut self, input: Input) -> Result<(), Error> { // Determine the module name let name = input .path .strip_prefix(&input.source_base_path) .expect("Source tree strip prefix") .parent() .expect("Source tree parent") .join(input.path.file_stem().expect("Source tree file stem")) .to_str() .expect("Source tree to_str") .to_string() .replace("\\", "/"); // Parse the source let (mut module, module_extra) = crate::parse::parse_module(&input.src).map_err(|e| Error::Parse { path: input.path.clone(), src: input.src.clone(), error: e, })?; // Store the name module.name = name.split('/').map(|s| s.to_string()).collect(); // Check to see if we already have a module with this name if let Some(Module { path, .. }) = self.indexes.get(&name).and_then(|i| self.modules.get(i)) { return Err(Error::DuplicateModule { module: name.clone(), first: path.clone(), second: input.path, }); } // Register the module let index = self.graph.add_node(name.clone()); let _ = self.indexes.insert(name, index); let _ = self.modules.insert( index, Module { src: input.src, path: input.path, origin: input.origin, source_base_path: input.source_base_path, module, module_extra, }, ); Ok(()) } }
formatMessage.test.ts
/* eslint-disable @typescript-eslint/camelcase */ import '@formatjs/intl-numberformat/polyfill' import '@formatjs/intl-numberformat/locale-data/en' import '@formatjs/intl-numberformat/locale-data/es' import IntlMessageFormat from 'intl-messageformat' import {parse} from '@formatjs/icu-messageformat-parser' import {formatMessage as baseFormatMessage} from '../src/message' import {Formatters, OptionalIntlConfig, IntlFormatters} from '../src/types' describe('format API', () => { const {NODE_ENV} = process.env let config: OptionalIntlConfig<any> let state: Formatters beforeEach(() => { config = { locale: 'en', messages: { no_args: 'Hello, World!', with_arg: 'Hello, {name}!', with_named_format: 'It is {now, date, year_only}', with_html: 'Hello, <b>{name}</b>!', missing: undefined as any, empty: '', invalid: 'invalid {}', missing_value: 'missing {arg_missing}', missing_named_format: 'missing {now, date, format_missing}', richText: 'rich <b>text</b>', ast_simple: parse('hello world'), ast_var: parse('hello there, {name}'),
date: { 'year-only': { year: 'numeric', }, missing: undefined, }, time: { 'hour-only': { hour: '2-digit', hour12: false, }, missing: undefined, }, relative: { seconds: { style: 'narrow', }, missing: undefined, }, number: { percent: { style: 'percent', minimumFractionDigits: 2, }, missing: undefined, }, } as any, defaultLocale: 'en', defaultFormats: {}, onError: jest.fn(), } state = { getDateTimeFormat: jest .fn() .mockImplementation((...args) => new Intl.DateTimeFormat(...args)), getNumberFormat: jest .fn() .mockImplementation((...args) => new Intl.NumberFormat(...args)), getMessageFormat: jest .fn() .mockImplementation( (msg, ...args) => new IntlMessageFormat(msg, ...args) ), getRelativeTimeFormat: jest .fn() .mockImplementation((...args) => new Intl.RelativeTimeFormat(...args)), getPluralRules: jest .fn() .mockImplementation((...args) => new Intl.PluralRules(...args)), getListFormat: jest .fn() .mockImplementation((...args) => new Intl.ListFormat(...args)), getDisplayNames: jest .fn() .mockImplementation( (...args) => new (Intl as any).DisplayNames(...args) ), } }) afterEach(() => { process.env.NODE_ENV = NODE_ENV }) describe('formatMessage()', () => { let formatMessage: IntlFormatters['formatMessage'] beforeEach(() => { // @ts-ignore formatMessage = baseFormatMessage.bind(null, config, state) }) it('should hot path message without values', function () { ;(state.getMessageFormat as jest.Mock).mockClear() expect(formatMessage({id: 'no_args'})).toBe('Hello, World!') expect(state.getMessageFormat).not.toHaveBeenCalled() expect(formatMessage({id: 'with_arg'}, {name: 'foo'})).toBe('Hello, foo!') expect(state.getMessageFormat).toHaveBeenCalled() }) it('should hot path message without values', function () { ;(state.getMessageFormat as jest.Mock).mockClear() const err = jest.spyOn(console, 'error') expect(formatMessage({id: 'no_args'})).toBe('Hello, World!') expect(err).not.toHaveBeenCalled() }) it('should not crash of messages does not have Object.prototype', function () { const messages = Object.create(null) messages!.no_args = 'Hello' // @ts-ignore formatMessage = baseFormatMessage.bind( null, { ...config, messages, }, state ) expect(() => formatMessage({id: 'no_args'})).not.toThrow() expect(formatMessage({id: 'no_args'})).toBe('Hello') }) ;[`Hello, World!'{foo}'`, `'\ud83d'\udc04`].forEach(msg => it(`should render escaped msg ${msg} properly in production`, () => { process.env.NODE_ENV = 'production' const descriptor = { id: 'hello', defaultMessage: msg, } const mf = new IntlMessageFormat(msg, 'en') expect(formatMessage(descriptor)).toBe(mf.format()) }) ) it('throws when no Message Descriptor is provided', () => { // @ts-ignore expect(() => formatMessage()).toThrow( '[@formatjs/intl] An `id` must be provided to format a message.' ) }) it('throws when Message Descriptor `id` is missing or falsy', () => { expect(() => formatMessage({})).toThrow( '[@formatjs/intl] An `id` must be provided to format a message.' ) ;[undefined, null, false, 0, ''].forEach(id => { // @ts-ignore expect(() => formatMessage({id})).toThrow( '[@formatjs/intl] An `id` must be provided to format a message.' ) }) }) it('formats basic messages', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.no_args, locale) expect(formatMessage({id: 'no_args'})).toBe(mf.format()) }) it('formats basic message with preparsed defaultMessage', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.ast_var, locale) expect( formatMessage( {id: 'foo', defaultMessage: messages!.ast_var}, { name: 'hey', } ) ).toBe( mf.format({ name: 'hey', }) ) }) it('formats message with ID as a method in Object.prototype, GH issue #1885', () => { expect(formatMessage({id: 'toString'})).toBe('toString') expect(formatMessage({id: '__proto__'})).toBe('__proto__') }) it('formats legacy HTML messages', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.richText, locale) const values = { b: (s: string) => `<foobar>${s}</foobar>`, } expect(formatMessage({id: 'richText'}, values)).toBe( // @ts-ignore mf.format<string>(values) ) }) it('formats basic AST messages', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.ast_simple, locale) expect(formatMessage({id: 'ast_simple'})).toBe(mf.format()) }) it('formats basic AST messages in prod', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.ast_simple, locale) process.env.NODE_ENV = 'production' expect(formatMessage({id: 'ast_simple'})).toBe(mf.format()) }) it('formats messages with placeholders', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.with_arg, locale) const values = {name: 'Eric'} expect(formatMessage({id: 'with_arg'}, values)).toBe(mf.format(values)) }) it('formats AST message with placeholders', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.ast_var, locale) const values = {name: 'Eric'} expect(formatMessage({id: 'ast_var'}, values)).toBe(mf.format(values)) }) it('formats messages with named formats', () => { const {locale, messages, formats} = config const mf = new IntlMessageFormat( messages!.with_named_format, locale, formats ) const values = {now: Date.now()} expect(formatMessage({id: 'with_named_format'}, values)).toBe( mf.format(values) ) }) describe('fallbacks', () => { it('formats message with missing named formats', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.missing_named_format, locale) const values = {now: Date.now()} expect(formatMessage({id: 'missing_named_format'}, values)).toBe( mf.format(values) ) }) it('formats `defaultMessage` when message is missing', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.with_arg, locale) const id = 'missing' const values = {name: 'Eric'} expect( formatMessage( { id: id, defaultMessage: messages!.with_arg, }, values ) ).toBe(mf.format(values)) }) it('warns when `message` is missing and locales are different', () => { config.locale = 'fr' const {locale, messages, defaultLocale} = config const mf = new IntlMessageFormat(messages!.with_arg, locale) const id = 'missing' const values = {name: 'Eric'} expect(locale).not.toEqual(defaultLocale) expect( formatMessage( { id, defaultMessage: messages!.with_arg, }, values ) ).toBe(mf.format(values)) expect((config.onError as jest.Mock).mock.calls.map(c => c[0].code)) .toMatchInlineSnapshot(` Array [ "MISSING_TRANSLATION", ] `) }) it('warns when `message` and `defaultMessage` are missing', () => { const {messages} = config const id = 'missing' const values = {name: 'Eric'} expect( formatMessage( { id: id, defaultMessage: messages!.missing, }, values ) ).toBe(id) expect((config.onError as jest.Mock).mock.calls.map(c => c[0].code)) .toMatchInlineSnapshot(` Array [ "MISSING_TRANSLATION", ] `) }) it('formats `defaultMessage` when message has a syntax error', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.with_arg, locale) const id = 'invalid' const values = {name: 'Eric'} expect( formatMessage( { id: id, defaultMessage: messages!.with_arg, }, values ) ).toBe(mf.format(values)) expect( (config.onError as jest.Mock).mock.calls.map(c => c[0].code) ).toMatchSnapshot() }) it('formats `defaultMessage` when message has missing values', () => { const {locale, messages} = config const mf = new IntlMessageFormat(messages!.with_arg, locale) const id = 'missing_value' const values = {name: 'Eric'} expect( formatMessage( { id: id, defaultMessage: messages!.with_arg, }, values ) ).toBe(mf.format(values)) expect( (config.onError as jest.Mock).mock.calls.map(c => c[0].code) ).toMatchSnapshot() }) it('returns message source when message and `defaultMessage` have formatting errors', () => { const {messages} = config const id = 'missing_value' expect( formatMessage( { id, defaultMessage: messages!.invalid, }, { foo: 1, } ) ).toBe(messages![id]) expect( (config.onError as jest.Mock).mock.calls.map(c => c[0].code) ).toMatchSnapshot() }) it('returns message source when formatting error and missing `defaultMessage`', () => { const {messages} = config const id = 'missing_value' expect( formatMessage( { id, defaultMessage: messages!.missing, }, {foo: 1} ) ).toBe(messages![id]) expect( (config.onError as jest.Mock).mock.calls.map(c => c[0].code) ).toMatchSnapshot() }) it('returns `defaultMessage` source when formatting errors and missing message', () => { config.locale = 'en-US' const {messages} = config const id = 'missing' expect( formatMessage({ id, defaultMessage: messages!.invalid, }) ).toBe(messages!.invalid) expect( (config.onError as jest.Mock).mock.calls.map(c => c[0].code) ).toMatchSnapshot() }) it('returns message `id` when message and `defaultMessage` are missing', () => { const id = 'missing' expect(formatMessage({id})).toBe(id) expect( (config.onError as jest.Mock).mock.calls.map(c => c[0].code) ).toMatchSnapshot() }) it('returns message `id` when message and `defaultMessage` are empty', () => { const {messages} = config const id = 'empty' expect( formatMessage({ id: id, defaultMessage: messages![id], }) ).toBe(id) expect( (config.onError as jest.Mock).mock.calls.map(c => c[0].code) ).toMatchSnapshot() }) it('allow passing Intl.MessageFormat opts in', function () { const {locale, messages, formats} = config const opts = { ignoreTag: true, } const mf = new IntlMessageFormat( messages!.richText, locale, formats, opts ) expect(formatMessage({id: 'richText'}, opts)).toBe(mf.format()) }) }) }) })
} as Record<string, any>, formats: {
app.py
from flask import Flask from flask import render_template app = Flask(__name__) @app.route("/") def index():
if __name__ == "__main__": app.run() © 2018 GitHub, Inc.
greeting = "Hello World" return render_template("index.html", greeting=greeting)
test_timeout.py
"""Unit tests dla socket timeout feature.""" zaimportuj functools zaimportuj unittest z test zaimportuj support # This requires the 'network' resource jako given on the regrtest command line. skip_expected = nie support.is_resource_enabled('network') zaimportuj time zaimportuj errno zaimportuj socket @functools.lru_cache() def resolve_address(host, port): """Resolve an (host, port) to an address. We must perform name resolution before timeout tests, otherwise it will be performed by connect(). """ przy support.transient_internet(host): zwróć socket.getaddrinfo(host, port, socket.AF_INET, socket.SOCK_STREAM)[0][4] klasa CreationTestCase(unittest.TestCase): """Test case dla socket.gettimeout() oraz socket.settimeout()""" def setUp(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def te
elf): self.sock.close() def testObjectCreation(self): # Test Socket creation self.assertEqual(self.sock.gettimeout(), Nic, "timeout nie disabled by default") def testFloatReturnValue(self): # Test zwróć value of gettimeout() self.sock.settimeout(7.345) self.assertEqual(self.sock.gettimeout(), 7.345) self.sock.settimeout(3) self.assertEqual(self.sock.gettimeout(), 3) self.sock.settimeout(Nic) self.assertEqual(self.sock.gettimeout(), Nic) def testReturnType(self): # Test zwróć type of gettimeout() self.sock.settimeout(1) self.assertEqual(type(self.sock.gettimeout()), type(1.0)) self.sock.settimeout(3.9) self.assertEqual(type(self.sock.gettimeout()), type(1.0)) def testTypeCheck(self): # Test type checking by settimeout() self.sock.settimeout(0) self.sock.settimeout(0) self.sock.settimeout(0.0) self.sock.settimeout(Nic) self.assertRaises(TypeError, self.sock.settimeout, "") self.assertRaises(TypeError, self.sock.settimeout, "") self.assertRaises(TypeError, self.sock.settimeout, ()) self.assertRaises(TypeError, self.sock.settimeout, []) self.assertRaises(TypeError, self.sock.settimeout, {}) self.assertRaises(TypeError, self.sock.settimeout, 0j) def testRangeCheck(self): # Test range checking by settimeout() self.assertRaises(ValueError, self.sock.settimeout, -1) self.assertRaises(ValueError, self.sock.settimeout, -1) self.assertRaises(ValueError, self.sock.settimeout, -1.0) def testTimeoutThenBlocking(self): # Test settimeout() followed by setblocking() self.sock.settimeout(10) self.sock.setblocking(1) self.assertEqual(self.sock.gettimeout(), Nic) self.sock.setblocking(0) self.assertEqual(self.sock.gettimeout(), 0.0) self.sock.settimeout(10) self.sock.setblocking(0) self.assertEqual(self.sock.gettimeout(), 0.0) self.sock.setblocking(1) self.assertEqual(self.sock.gettimeout(), Nic) def testBlockingThenTimeout(self): # Test setblocking() followed by settimeout() self.sock.setblocking(0) self.sock.settimeout(1) self.assertEqual(self.sock.gettimeout(), 1) self.sock.setblocking(1) self.sock.settimeout(1) self.assertEqual(self.sock.gettimeout(), 1) klasa TimeoutTestCase(unittest.TestCase): # There are a number of tests here trying to make sure that an operation # doesn't take too much longer than expected. But competing machine # activity makes it inevitable that such tests will fail at times. # When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K # oraz Win98SE. Boosting it to 2.0 helped a lot, but isn't a real # solution. fuzz = 2.0 localhost = support.HOST def setUp(self): podnieś NotImplementedError() tearDown = setUp def _sock_operation(self, count, timeout, method, *args): """ Test the specified socket method. The method jest run at most `count` times oraz must podnieś a socket.timeout within `timeout` + self.fuzz seconds. """ self.sock.settimeout(timeout) method = getattr(self.sock, method) dla i w range(count): t1 = time.time() spróbuj: method(*args) wyjąwszy socket.timeout jako e: delta = time.time() - t1 przerwij inaczej: self.fail('socket.timeout was nie podnieśd') # These checks should account dla timing unprecision self.assertLess(delta, timeout + self.fuzz) self.assertGreater(delta, timeout - 1.0) klasa TCPTimeoutTestCase(TimeoutTestCase): """TCP test case dla socket.socket() timeout functions""" def setUp(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.addr_remote = resolve_address('www.python.org.', 80) def tearDown(self): self.sock.close() def testConnectTimeout(self): # Testing connect timeout jest tricky: we need to have IP connectivity # to a host that silently drops our packets. We can't simulate this # z Python because it's a function of the underlying TCP/IP stack. # So, the following Snakebite host has been defined: blackhole = resolve_address('blackhole.snakebite.net', 56666) # Blackhole has been configured to silently drop any incoming packets. # No RSTs (dla TCP) albo ICMP UNREACH (dla UDP/ICMP) will be sent back # to hosts that attempt to connect to this address: which jest exactly # what we need to confidently test connect timeout. # However, we want to prevent false positives. It's nie unreasonable # to expect certain hosts may nie be able to reach the blackhole, due # to firewalling albo general network configuration. In order to improve # our confidence w testing the blackhole, a corresponding 'whitehole' # has also been set up using one port higher: whitehole = resolve_address('whitehole.snakebite.net', 56667) # This address has been configured to immediately drop any incoming # packets jako well, but it does it respectfully przy regards to the # incoming protocol. RSTs are sent dla TCP packets, oraz ICMP UNREACH # jest sent dla UDP/ICMP packets. This means our attempts to connect to # it should be met immediately przy ECONNREFUSED. The test case has # been structured around this premise: jeżeli we get an ECONNREFUSED from # the whitehole, we proceed przy testing connect timeout against the # blackhole. If we don't, we skip the test (przy a message about nie # getting the required RST z the whitehole within the required # timeframe). # For the records, the whitehole/blackhole configuration has been set # up using the 'pf' firewall (available on BSDs), using the following: # # ext_if="bge0" # # blackhole_ip="35.8.247.6" # whitehole_ip="35.8.247.6" # blackhole_port="56666" # whitehole_port="56667" # # block zwróć w log quick on $ext_jeżeli proto { tcp udp } \ # z any to $whitehole_ip port $whitehole_port # block drop w log quick on $ext_jeżeli proto { tcp udp } \ # z any to $blackhole_ip port $blackhole_port # skip = Prawda sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Use a timeout of 3 seconds. Why 3? Because it's more than 1, oraz # less than 5. i.e. no particular reason. Feel free to tweak it if # you feel a different value would be more appropriate. timeout = 3 sock.settimeout(timeout) spróbuj: sock.connect((whitehole)) wyjąwszy socket.timeout: dalej wyjąwszy OSError jako err: jeżeli err.errno == errno.ECONNREFUSED: skip = Nieprawda w_końcu: sock.close() usuń sock jeżeli skip: self.skipTest( "We didn't receive a connection reset (RST) packet z " "{}:{} within {} seconds, so we're unable to test connect " "timeout against the corresponding {}:{} (which jest " "configured to silently drop packets)." .format( whitehole[0], whitehole[1], timeout, blackhole[0], blackhole[1], ) ) # All that hard work just to test jeżeli connect times out w 0.001s ;-) self.addr_remote = blackhole przy support.transient_internet(self.addr_remote[0]): self._sock_operation(1, 0.001, 'connect', self.addr_remote) def testRecvTimeout(self): # Test recv() timeout przy support.transient_internet(self.addr_remote[0]): self.sock.connect(self.addr_remote) self._sock_operation(1, 1.5, 'recv', 1024) def testAcceptTimeout(self): # Test accept() timeout support.bind_port(self.sock, self.localhost) self.sock.listen() self._sock_operation(1, 1.5, 'accept') def testSend(self): # Test send() timeout przy socket.socket(socket.AF_INET, socket.SOCK_STREAM) jako serv: support.bind_port(serv, self.localhost) serv.listen() self.sock.connect(serv.getsockname()) # Send a lot of data w order to bypass buffering w the TCP stack. self._sock_operation(100, 1.5, 'send', b"X" * 200000) def testSendto(self): # Test sendto() timeout przy socket.socket(socket.AF_INET, socket.SOCK_STREAM) jako serv: support.bind_port(serv, self.localhost) serv.listen() self.sock.connect(serv.getsockname()) # The address argument jest ignored since we already connected. self._sock_operation(100, 1.5, 'sendto', b"X" * 200000, serv.getsockname()) def testSendall(self): # Test sendall() timeout przy socket.socket(socket.AF_INET, socket.SOCK_STREAM) jako serv: support.bind_port(serv, self.localhost) serv.listen() self.sock.connect(serv.getsockname()) # Send a lot of data w order to bypass buffering w the TCP stack. self._sock_operation(100, 1.5, 'sendall', b"X" * 200000) klasa UDPTimeoutTestCase(TimeoutTestCase): """UDP test case dla socket.socket() timeout functions""" def setUp(self): self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) def tearDown(self): self.sock.close() def testRecvfromTimeout(self): # Test recvfrom() timeout # Prevent "Address already w use" socket exceptions support.bind_port(self.sock, self.localhost) self._sock_operation(1, 1.5, 'recvfrom', 1024) def test_main(): support.requires('network') support.run_unittest( CreationTestCase, TCPTimeoutTestCase, UDPTimeoutTestCase, ) jeżeli __name__ == "__main__": test_main()
arDown(s
templates.ts
import { svg } from 'lit-element'; import { Point } from './interfaces/interfaces'; import { invertYAxis } from './utils'; export function originTemplate( origin: Point, size: number, strokeWidth: number ) { const markerSize = 3 * strokeWidth; let xArrow = {x: origin.x + size, y: origin.y}; let yArrow = {x: origin.x, y: origin.y + size}; let xLabel = {x: xArrow.x - 4 * markerSize, y: xArrow.y - 3 * markerSize}; let yLabel = {x: yArrow.x - 3 * markerSize, y: yArrow.y - 4 * markerSize}; // Invert y-axis xArrow = invertYAxis(xArrow); yArrow = invertYAxis(yArrow); xLabel = invertYAxis(xLabel); yLabel = invertYAxis(yLabel); const invOrigin = invertYAxis(origin); return svg` <style> text { user-select: none; } </style> <marker id="arrow" viewBox="0 0 10 10" refX="5" refY="5" markerWidth=${markerSize} markerHeight=${markerSize} orient="auto-start-reverse"> <path d="M 0 0 L 10 5 L 0 10 z" /> </marker> <path d='M ${yArrow.x} ${yArrow.y} L ${invOrigin.x} ${invOrigin.y} L ${xArrow.x} ${xArrow.y}' stroke="black" stroke-width=${strokeWidth} fill="none" marker-start="url(#arrow)" marker-end="url(#arrow)" /> <text x=${xLabel.x} y=${xLabel.y} >X</text> <text x=${yLabel.x} y=${yLabel.y} >Y</text> `;
}
conditionals.rs
pub fn
() { let age = 18; let check_id: bool = true; let knows_person_of_age = false; let can_drink = age >= 21 && check_id; println!("can drink var: {}", can_drink); // if/else if can_drink || knows_person_of_age { println!("Over 21"); } else if age < 21 && check_id { println!("under 21") } else { println!("need to see id"); } }
run
menu-options.ts
}
export class MenuOptions { search:boolean = false
banderplug.py
""" banderplug.py Author: Jacob Ruzi This script is intended to be run with an ini formatted configuration file that defines a 'Choose Your Own Adventure' game. See example_game.ini and README.md for guidelines on how to format your game. See BanderPlug.log in the current directory for error details. """ import configparser import logging.config import re import argparse import os.path # Set up logging logging.config.fileConfig(os.path.join('conf','logging.conf')) logger = logging.getLogger('banderlog') class BanderGame:
def getArguments(): """ Gets the name of the gameFile. :return: The arguments provided by the user """ parser = argparse.ArgumentParser() parser.add_argument('gameFile', help='The ini formatted file with the game configuration') return parser.parse_args() if __name__=="__main__": args = getArguments() myGame = BanderGame(args.gameFile) myGame.playGame()
""" BanderGame An object holding all configuration details of the game and the user's current place. Parameters: gameFile - an ini formatted configuration file that defines the game """ def __init__(self,gameFile): self.stage = '0' self.title = '' # Use helper method to check gameFile validity self.__loadStages(gameFile) def __failConfiguration(self,message): """ Let the user know the game configuration has failed. :param message: A description of the configuration error """ logger.critical('Configuration for the game: %s: is incorrect - %s' % (self.title,message)) exit(1) def __loadStages(self,gameFile): """ __loadStages This helper method is used by the constructor to check that the gameFile is correctly formatted. :param gameFile: an ini formatted configuration file that defines the game """ openGameFile = configparser.ConfigParser() try: openGameFile.read(gameFile) except Exception as e: self.__failConfiguration('Cannot read game configuration file: %s' % gameFile) stanzas = openGameFile.sections() gameStages = {} idLinks = [] # Check for settings stanza if 'settings' not in stanzas: self.__failConfiguration('A settings stanza is required') # Check for settings's ID and title settingsID = openGameFile['settings'].get('id','BAD') settingsTitle = openGameFile['settings'].get('title','BAD') if settingsID!='-1' or settingsTitle=='BAD': self.__failConfiguration('The settings stanza must have an ID of -1 and a title.') self.title = settingsTitle logger.info('The game is being configured: %s' % settingsTitle) for stanza in stanzas: # Check for Valid ID stageID = openGameFile[stanza].get('id','BAD') try: stageID = int(stageID) except ValueError as e: self.__failConfiguration('The stage - %s - is incorrectly configured: bad id' % stanza) # Validate that a previous stage has not already used this ID if str(stageID) in gameStages: self.__failConfiguration('At least two stages have the same ID: %s' % stageID) # Begin creating stage dictionary gameStages[str(stageID)] = {} stage = gameStages[str(stageID)] for key,value in openGameFile[stanza].items(): if key != 'id': stage[key] = value # Fill in default values for potentially empty keys if 'gamewinning' not in stage: stage['gamewinning'] = 'False' if 'gameending' not in stage: stage['gameending'] = 'False' # Validate stage has a message if 'message' not in stage.keys() and stanza!='settings': self.__failConfiguration('The stage - %s - is incorrectly configured: missing message' % stanza) # Validate stage has choices, unless it is the end of the game if stage['gameending'] == 'False' and stage['gamewinning'] == 'False' and stanza!='settings': numChoices = 0 choicePattern = re.compile(r"choice\.(\d+)") for key in stage.keys(): matching = choicePattern.match(key) if matching: numChoices += 1 if 'response.%s' % matching.group(1) not in stage.keys(): self.__failConfiguration('The stage - %s - is missing a response for choice %s' \ % (stanza,matching.group(1))) # Add the response to a list for later verification that the new stage ID exists idLinks.append((stanza,stage['response.%s' % matching.group(1)])) if numChoices == 0: self.__failConfiguration('The stage - %s - does not have any choices' % stanza) # Validate that choice responses contain valid stage IDs for stanza,link in idLinks: if link not in gameStages: self.__failConfiguration('The stage - %s - links to a nonexistant stage ID: %s' % (stanza,link)) self.stages = gameStages def __presentStage(self, gameOver=False): """ __presentStage This helper method is used by the playGame function to present the user's options and check the validity of their choices. :param gameOver: A boolean representing whether or not the game has ended """ # Get the configuration details for the current game stage stageSettings = self.stages[self.stage] # Print the stage's message and choices to the console print('\n%s' % stageSettings['message']) choices = {} for key in stageSettings: settingName = key.split('.') if settingName[0] == 'choice': choices[settingName[1]] = stageSettings[key] for num,option in sorted(choices.items()): print('%s: %s' % (num, option)) # If the game is not over, the user must make a choice if not gameOver: waitingForGoodInput = True while waitingForGoodInput: userChoice = input('Enter the number of your selection: ') if 'response.%s' % userChoice in stageSettings: self.stage = stageSettings['response.%s' % userChoice] waitingForGoodInput = False else: print('That is not a valid option.') logger.warning('The user did not select a valid choice in stage %s:%s' % (self.stage, userChoice)) logger.info('The user has moved to stage %s' % self.stage) def playGame(self): """ playGame The flow of the game is controlled here. The game proceeds until it is ended by a winning or losing choice by the user. """ stageSettings = self.stages[self.stage] # Keep presenting game stages until the game is over while not (stageSettings['gamewinning'] == 'True' or stageSettings['gameending'] == 'True'): self.__presentStage() stageSettings = self.stages[self.stage] self.__presentStage(True) if stageSettings['gamewinning'] == 'True': print('You won!') logger.info('The user won the game') elif stageSettings['gameending'] == 'True': print('You lost!') logger.info('The user lost the game') def __str__(self): return str(self.stages)
analyze.py
# BirdWeather edits by @timsterc # Other edits by @CaiusX and @mcguirepr89 import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['CUDA_VISIBLE_DEVICES'] = '' try: import tflite_runtime.interpreter as tflite except: from tensorflow import lite as tflite import argparse import operator import librosa import numpy as np import math import time from decimal import Decimal import json ############################################################################### import requests import mysql.connector ############################################################################### import datetime import pytz from tzlocal import get_localzone from pathlib import Path def loadModel(): global INPUT_LAYER_INDEX global OUTPUT_LAYER_INDEX global MDATA_INPUT_INDEX global CLASSES print('LOADING TF LITE MODEL...', end=' ') # Load TFLite model and allocate tensors. interpreter = tflite.Interpreter(model_path='model/BirdNET_6K_GLOBAL_MODEL.tflite',num_threads=2) interpreter.allocate_tensors() # Get input and output tensors. input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() # Get input tensor index INPUT_LAYER_INDEX = input_details[0]['index'] MDATA_INPUT_INDEX = input_details[1]['index'] OUTPUT_LAYER_INDEX = output_details[0]['index'] # Load labels CLASSES = [] with open('model/labels.txt', 'r') as lfile: for line in lfile.readlines(): CLASSES.append(line.replace('\n', '')) print('DONE!') return interpreter def loadCustomSpeciesList(path): slist = [] if os.path.isfile(path): with open(path, 'r') as csfile: for line in csfile.readlines(): slist.append(line.replace('\r', '').replace('\n', '')) return slist def splitSignal(sig, rate, overlap, seconds=3.0, minlen=1.5): # Split signal with overlap sig_splits = [] for i in range(0, len(sig), int((seconds - overlap) * rate)): split = sig[i:i + int(seconds * rate)] # End of signal? if len(split) < int(minlen * rate): break # Signal chunk too short? Fill with zeros. if len(split) < int(rate * seconds): temp = np.zeros((int(rate * seconds))) temp[:len(split)] = split split = temp sig_splits.append(split) return sig_splits def readAudioData(path, overlap, sample_rate=48000): print('READING AUDIO DATA...', end=' ', flush=True) # Open file with librosa (uses ffmpeg or libav) sig, rate = librosa.load(path, sr=sample_rate, mono=True, res_type='kaiser_fast') # Split audio into 3-second chunks chunks = splitSignal(sig, rate, overlap) print('DONE! READ', str(len(chunks)), 'CHUNKS.') return chunks def convertMetadata(m): # Convert week to cosine
def custom_sigmoid(x, sensitivity=1.0): return 1 / (1.0 + np.exp(-sensitivity * x)) def predict(sample, interpreter, sensitivity): # Make a prediction interpreter.set_tensor(INPUT_LAYER_INDEX, np.array(sample[0], dtype='float32')) interpreter.set_tensor(MDATA_INPUT_INDEX, np.array(sample[1], dtype='float32')) interpreter.invoke() prediction = interpreter.get_tensor(OUTPUT_LAYER_INDEX)[0] # Apply custom sigmoid p_sigmoid = custom_sigmoid(prediction, sensitivity) # Get label and scores for pooled predictions p_labels = dict(zip(CLASSES, p_sigmoid)) # Sort by score p_sorted = sorted(p_labels.items(), key=operator.itemgetter(1), reverse=True) # Remove species that are on blacklist for i in range(min(10, len(p_sorted))): if p_sorted[i][0] in ['Human_Human', 'Non-bird_Non-bird', 'Noise_Noise']: p_sorted[i] = (p_sorted[i][0], 0.0) # Only return first the top ten results return p_sorted[:10] def analyzeAudioData(chunks, lat, lon, week, sensitivity, overlap, interpreter): detections = {} start = time.time() print('ANALYZING AUDIO...', end=' ', flush=True) # Convert and prepare metadata mdata = convertMetadata(np.array([lat, lon, week])) mdata = np.expand_dims(mdata, 0) # Parse every chunk pred_start = 0.0 for c in chunks: # Prepare as input signal sig = np.expand_dims(c, 0) # Make prediction p = predict([sig, mdata], interpreter, sensitivity) # Save result and timestamp pred_end = pred_start + 3.0 detections[str(pred_start) + ';' + str(pred_end)] = p pred_start = pred_end - overlap print('DONE! Time', int((time.time() - start) * 10) / 10.0, 'SECONDS') return detections def writeResultsToFile(detections, min_conf, path): print('WRITING RESULTS TO', path, '...', end=' ') rcnt = 0 with open(path, 'w') as rfile: rfile.write('Start (s);End (s);Scientific name;Common name;Confidence\n') for d in detections: for entry in detections[d]: if entry[1] >= min_conf and (entry[0] in WHITE_LIST or len(WHITE_LIST) == 0): rfile.write(d + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) + '\n') rcnt += 1 print('DONE! WROTE', rcnt, 'RESULTS.') def main(): global WHITE_LIST # Parse passed arguments parser = argparse.ArgumentParser() parser.add_argument('--i', help='Path to input file.') parser.add_argument('--o', default='result.csv', help='Path to output file. Defaults to result.csv.') parser.add_argument('--lat', type=float, default=-1, help='Recording location latitude. Set -1 to ignore.') parser.add_argument('--lon', type=float, default=-1, help='Recording location longitude. Set -1 to ignore.') parser.add_argument('--week', type=int, default=-1, help='Week of the year when the recording was made. Values in [1, 48] (4 weeks per month). Set -1 to ignore.') parser.add_argument('--overlap', type=float, default=0.0, help='Overlap in seconds between extracted spectrograms. Values in [0.0, 2.9]. Defaults tp 0.0.') parser.add_argument('--sensitivity', type=float, default=1.0, help='Detection sensitivity; Higher values result in higher sensitivity. Values in [0.5, 1.5]. Defaults to 1.0.') parser.add_argument('--min_conf', type=float, default=0.1, help='Minimum confidence threshold. Values in [0.01, 0.99]. Defaults to 0.1.') parser.add_argument('--custom_list', default='', help='Path to text file containing a list of species. Not used if not provided.') parser.add_argument('--birdweather_id', default='99999', help='Private Station ID for BirdWeather.') args = parser.parse_args() # Load model interpreter = loadModel() # Load custom species list if not args.custom_list == '': WHITE_LIST = loadCustomSpeciesList(args.custom_list) else: WHITE_LIST = [] birdweather_id = args.birdweather_id # Read audio data audioData = readAudioData(args.i, args.overlap) # Get Date/Time from filename in case Pi gets behind #now = datetime.now() full_file_name = args.i file_name = Path(full_file_name).stem file_date = file_name.split('-birdnet-')[0] file_time = file_name.split('-birdnet-')[1] date_time_str = file_date + ' ' + file_time date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S') #print('Date:', date_time_obj.date()) #print('Time:', date_time_obj.time()) print('Date-time:', date_time_obj) now = date_time_obj current_date = now.strftime("%Y/%m/%d") current_time = now.strftime("%H:%M:%S") current_iso8601 = now.astimezone(get_localzone()).isoformat() week_number = int(now.strftime("%V")) week = max(1, min(week_number, 48)) sensitivity = max(0.5, min(1.0 - (args.sensitivity - 1.0), 1.5)) # Process audio data and get detections detections = analyzeAudioData(audioData, args.lat, args.lon, week, sensitivity, args.overlap, interpreter) # Write detections to output file min_conf = max(0.01, min(args.min_conf, 0.99)) writeResultsToFile(detections, min_conf, args.o) ############################################################################### ############################################################################### soundscape_uploaded = False # Write detections to Database for i in detections: print("\n", detections[i][0],"\n") with open('BirdDB.txt', 'a') as rfile: for d in detections: print("\n", "Database Entry", "\n") for entry in detections[d]: if entry[1] >= min_conf and (entry[0] in WHITE_LIST or len(WHITE_LIST) == 0): rfile.write(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' \ + str(entry[1]) +";" + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' \ + str(sensitivity) +';' + str(args.overlap) + '\n') def insert_variables_into_table(Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap): try: connection = mysql.connector.connect(host='localhost', database='birds', user='birder', password='birdnet') cursor = connection.cursor() mySql_insert_query = """INSERT INTO detections (Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """ record = (Date, Time, Sci_Name, Com_Name, Confidence, Lat, Lon, Cutoff, Week, Sens, Overlap) cursor.execute(mySql_insert_query, record) connection.commit() print("Record inserted successfully into detections table") except mysql.connector.Error as error: print("Failed to insert record into detections table {}".format(error)) finally: if connection.is_connected(): connection.close() print("MySQL connection is closed") species = entry[0] sci_name,com_name = species.split('_') insert_variables_into_table(str(current_date), str(current_time), sci_name, com_name, \ str(entry[1]), str(args.lat), str(args.lon), str(min_conf), str(week), \ str(args.sensitivity), str(args.overlap)) print(str(current_date) + ';' + str(current_time) + ';' + entry[0].replace('_', ';') + ';' + str(entry[1]) +";" + str(args.lat) + ';' + str(args.lon) + ';' + str(min_conf) + ';' + str(week) + ';' + str(args.sensitivity) +';' + str(args.overlap) + '\n') if birdweather_id != "99999": if soundscape_uploaded is False: # POST soundscape to server soundscape_url = "https://app.birdweather.com/api/v1/stations/" + birdweather_id + "/soundscapes" + "?timestamp=" + current_iso8601 with open(args.i, 'rb') as f: wav_data = f.read() response = requests.post(url=soundscape_url, data=wav_data, headers={'Content-Type': 'application/octet-stream'}) print("Soundscape POST Response Status - ", response.status_code) sdata = response.json() soundscape_id = sdata['soundscape']['id'] soundscape_uploaded = True # POST detection to server detection_url = "https://app.birdweather.com/api/v1/stations/" + birdweather_id + "/detections" start_time = d.split(';')[0] end_time = d.split(';')[1] post_begin = "{ " now_p_start = now + datetime.timedelta(seconds=float(start_time)) current_iso8601 = now_p_start.astimezone(get_localzone()).isoformat() post_timestamp = "\"timestamp\": \"" + current_iso8601 + "\"," post_lat = "\"lat\": " + str(args.lat) + "," post_lon = "\"lon\": " + str(args.lon) + "," post_soundscape_id = "\"soundscapeId\": " + str(soundscape_id) + "," post_soundscape_start_time = "\"soundscapeStartTime\": " + start_time + "," post_soundscape_end_time = "\"soundscapeEndTime\": " + end_time + "," post_commonName = "\"commonName\": \"" + entry[0].split('_')[1] + "\"," post_scientificName = "\"scientificName\": \"" + entry[0].split('_')[0] + "\"," post_algorithm = "\"algorithm\": " + "\"alpha\"" + "," post_confidence = "\"confidence\": " + str(entry[1]) post_end = " }" post_json = post_begin + post_timestamp + post_lat + post_lon + post_soundscape_id + post_soundscape_start_time + post_soundscape_end_time + post_commonName + post_scientificName + post_algorithm + post_confidence + post_end print(post_json) response = requests.post(detection_url, json=json.loads(post_json)) print("Detection POST Response Status - ", response.status_code) #time.sleep(3) ############################################################################### ############################################################################### if __name__ == '__main__': main() # Example calls # python3 analyze.py --i 'example/XC558716 - Soundscape.mp3' --lat 35.4244 --lon -120.7463 --week 18 # python3 analyze.py --i 'example/XC563936 - Soundscape.mp3' --lat 47.6766 --lon -122.294 --week 11 --overlap 1.5 --min_conf 0.25 --sensitivity 1.25 --custom_list 'example/custom_species_list.txt'
if m[2] >= 1 and m[2] <= 48: m[2] = math.cos(math.radians(m[2] * 7.5)) + 1 else: m[2] = -1 # Add binary mask mask = np.ones((3,)) if m[0] == -1 or m[1] == -1: mask = np.zeros((3,)) if m[2] == -1: mask[2] = 0.0 return np.concatenate([m, mask])