file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
db.py | """
db.py
"""
from pymongo import MongoClient
class Db: # pylint: disable=too-few-public-methods
"""
Database.
Singleton pattern, from Bruce Eckel
"""
class __Db: # pylint: disable=invalid-name
def __init__(self, dbname):
self.val = dbname
self.client = MongoClient('mongodb://localhost:27017/')
self.conn = self.client['leadreader_' + dbname]
def __str__(self):
return repr(self) + self.val
instance = None
def __init__(self, dbname='prod'):
if not Db.instance:
|
else:
Db.instance.val = dbname
def __getattr__(self, name):
return getattr(self.instance, name)
| Db.instance = Db.__Db(dbname) |
lib.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support code for rustc's built in unit-test and micro-benchmarking
//! framework.
//!
//! Almost all user code will only be interested in `Bencher` and
//! `black_box`. All other interactions (such as writing tests and
//! benchmarks themselves) should be done via the `#[test]` and
//! `#[bench]` attributes.
//!
//! See the [Testing Guide](../guide-testing.html) for more details.
// Currently, not much of this is meant for users. It is intended to
// support the simplest interface possible for representing and
// running tests while providing a base that other test frameworks may
// build off of.
#![crate_name = "test"]
#![unstable]
#![staged_api]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/")]
#![allow(unknown_features)]
#![feature(asm, slicing_syntax)]
#![feature(box_syntax)]
#![allow(unknown_features)] #![feature(int_uint)]
#![allow(unstable)]
extern crate getopts;
extern crate serialize;
extern crate "serialize" as rustc_serialize;
extern crate term;
pub use self::TestFn::*;
pub use self::ColorConfig::*;
pub use self::TestResult::*;
pub use self::TestName::*;
use self::TestEvent::*;
use self::NamePadding::*;
use self::OutputLocation::*;
use stats::Stats;
use getopts::{OptGroup, optflag, optopt};
use serialize::Encodable;
use term::Terminal;
use term::color::{Color, RED, YELLOW, GREEN, CYAN};
use std::any::Any;
use std::cmp;
use std::collections::BTreeMap;
use std::fmt;
use std::io::stdio::StdWriter;
use std::io::{File, ChanReader, ChanWriter};
use std::io;
use std::iter::repeat;
use std::num::{Float, Int};
use std::os;
use std::str::FromStr;
use std::sync::mpsc::{channel, Sender};
use std::thread::{self, Thread};
use std::thunk::{Thunk, Invoke};
use std::time::Duration;
// to be used by rustc to compile tests in libtest
pub mod test {
pub use {Bencher, TestName, TestResult, TestDesc,
TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
Metric, MetricMap,
StaticTestFn, StaticTestName, DynTestName, DynTestFn,
run_test, test_main, test_main_static, filter_tests,
parse_opts, StaticBenchFn, ShouldFail};
}
pub mod stats;
// The name of a test. By convention this follows the rules for rust
// paths; i.e. it should be a series of identifiers separated by double
// colons. This way if some test runner wants to arrange the tests
// hierarchically it may.
#[derive(Clone, PartialEq, Eq, Hash, Show)]
pub enum TestName {
StaticTestName(&'static str),
DynTestName(String)
}
impl TestName {
fn as_slice<'a>(&'a self) -> &'a str {
match *self {
StaticTestName(s) => s,
DynTestName(ref s) => s.as_slice()
}
}
}
impl fmt::Display for TestName {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self.as_slice(), f)
}
}
#[derive(Clone, Copy)]
enum NamePadding {
PadNone,
PadOnLeft,
PadOnRight,
}
impl TestDesc {
fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
let mut name = String::from_str(self.name.as_slice());
let fill = column_count.saturating_sub(name.len());
let mut pad = repeat(" ").take(fill).collect::<String>();
match align {
PadNone => name,
PadOnLeft => {
pad.push_str(name.as_slice());
pad
}
PadOnRight => {
name.push_str(pad.as_slice());
name
}
}
}
}
/// Represents a benchmark function.
pub trait TDynBenchFn {
fn run(&self, harness: &mut Bencher);
}
// A function that runs a test. If the function returns successfully,
// the test succeeds; if the function panics then the test fails. We
// may need to come up with a more clever definition of test in order
// to support isolation of tests into tasks.
pub enum TestFn {
StaticTestFn(fn()),
StaticBenchFn(fn(&mut Bencher)),
StaticMetricFn(fn(&mut MetricMap)),
DynTestFn(Thunk),
DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
DynBenchFn(Box<TDynBenchFn+'static>)
}
impl TestFn {
fn padding(&self) -> NamePadding {
match self {
&StaticTestFn(..) => PadNone,
&StaticBenchFn(..) => PadOnRight,
&StaticMetricFn(..) => PadOnRight,
&DynTestFn(..) => PadNone,
&DynMetricFn(..) => PadOnRight,
&DynBenchFn(..) => PadOnRight,
}
}
}
impl fmt::Debug for TestFn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
StaticTestFn(..) => "StaticTestFn(..)",
StaticBenchFn(..) => "StaticBenchFn(..)",
StaticMetricFn(..) => "StaticMetricFn(..)",
DynTestFn(..) => "DynTestFn(..)",
DynMetricFn(..) => "DynMetricFn(..)",
DynBenchFn(..) => "DynBenchFn(..)"
})
}
}
/// Manager of the benchmarking runs.
///
/// This is fed into functions marked with `#[bench]` to allow for
/// set-up & tear-down before running a piece of code repeatedly via a
/// call to `iter`.
#[derive(Copy)]
pub struct Bencher {
iterations: u64,
dur: Duration,
pub bytes: u64,
}
#[derive(Copy, Clone, Show, PartialEq, Eq, Hash)]
pub enum ShouldFail {
No,
Yes(Option<&'static str>)
}
// The definition of a single test. A test runner will run a list of
// these.
#[derive(Clone, Show, PartialEq, Eq, Hash)]
pub struct TestDesc {
pub name: TestName,
pub ignore: bool,
pub should_fail: ShouldFail,
}
unsafe impl Send for TestDesc {}
#[derive(Show)]
pub struct TestDescAndFn {
pub desc: TestDesc,
pub testfn: TestFn,
}
#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
pub struct Metric {
value: f64,
noise: f64
}
impl Metric {
pub fn new(value: f64, noise: f64) -> Metric {
Metric {value: value, noise: noise}
}
}
#[derive(PartialEq)]
pub struct MetricMap(BTreeMap<String,Metric>);
impl Clone for MetricMap {
fn clone(&self) -> MetricMap {
let MetricMap(ref map) = *self;
MetricMap(map.clone())
}
}
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
let opts =
match parse_opts(args) {
Some(Ok(o)) => o,
Some(Err(msg)) => panic!("{:?}", msg),
None => return
};
match run_tests_console(&opts, tests) {
Ok(true) => {}
Ok(false) => panic!("Some tests failed"),
Err(e) => panic!("io error when running tests: {:?}", e),
}
}
// A variant optimized for invocation with a static test vector.
// This will panic (intentionally) when fed any dynamic tests, because
// it is copying the static values out into a dynamic vector and cannot
// copy dynamic values. It is doing this because from this point on
// a ~[TestDescAndFn] is used in order to effect ownership-transfer
// semantics into parallel test runners, which in turn requires a ~[]
// rather than a &[].
pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
let owned_tests = tests.iter().map(|t| {
match t.testfn {
StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
_ => panic!("non-static tests passed to test::test_main_static")
}
}).collect();
test_main(args, owned_tests)
}
#[derive(Copy)]
pub enum ColorConfig {
AutoColor,
AlwaysColor,
NeverColor,
}
pub struct TestOpts {
pub filter: Option<String>,
pub run_ignored: bool,
pub run_tests: bool,
pub run_benchmarks: bool,
pub logfile: Option<Path>,
pub nocapture: bool,
pub color: ColorConfig,
}
impl TestOpts {
#[cfg(test)]
fn new() -> TestOpts {
TestOpts {
filter: None,
run_ignored: false,
run_tests: false,
run_benchmarks: false,
logfile: None,
nocapture: false,
color: AutoColor,
}
}
}
/// Result of parsing the options.
pub type OptRes = Result<TestOpts, String>;
fn optgroups() -> Vec<getopts::OptGroup> {
vec!(getopts::optflag("", "ignored", "Run ignored tests"),
getopts::optflag("", "test", "Run tests and not benchmarks"),
getopts::optflag("", "bench", "Run benchmarks instead of tests"),
getopts::optflag("h", "help", "Display this message (longer with --help)"),
getopts::optopt("", "logfile", "Write logs to the specified file instead \
of stdout", "PATH"),
getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
task, allow printing directly"),
getopts::optopt("", "color", "Configure coloring of output:
auto = colorize if stdout is a tty and tests are run on serially (default);
always = always colorize output;
never = never colorize output;", "auto|always|never"))
}
fn usage(binary: &str) {
let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
println!(r#"{usage}
The FILTER regex is tested against the name of all tests to run, and
only those tests that match are run.
By default, all tests are run in parallel. This can be altered with the
RUST_TEST_TASKS environment variable when running tests (set it to 1).
All tests have their standard output and standard error captured by default.
This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
environment variable. Logging is not captured by default.
Test Attributes:
#[test] - Indicates a function is a test to be run. This function
takes no arguments.
#[bench] - Indicates a function is a benchmark to be run. This
function takes one argument (test::Bencher).
#[should_fail] - This function (also labeled with #[test]) will only pass if
the code causes a failure (an assertion failure or panic!)
A message may be provided, which the failure string must
contain: #[should_fail(expected = "foo")].
#[ignore] - When applied to a function which is already attributed as a
test, then the test runner will ignore these tests during
normal test runs. Running with --ignored will run these
tests."#,
usage = getopts::usage(message.as_slice(),
optgroups().as_slice()));
}
// Parses command line arguments into test options
pub fn parse_opts(args: &[String]) -> Option<OptRes> {
let args_ = args.tail();
let matches =
match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
Ok(m) => m,
Err(f) => return Some(Err(f.to_string()))
};
if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
let filter = if matches.free.len() > 0 {
Some(matches.free[0].clone())
} else {
None
};
let run_ignored = matches.opt_present("ignored");
let logfile = matches.opt_str("logfile");
let logfile = logfile.map(|s| Path::new(s));
let run_benchmarks = matches.opt_present("bench");
let run_tests = ! run_benchmarks ||
matches.opt_present("test");
let mut nocapture = matches.opt_present("nocapture");
if !nocapture {
nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
}
let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
Some("auto") | None => AutoColor,
Some("always") => AlwaysColor,
Some("never") => NeverColor,
Some(v) => return Some(Err(format!("argument for --color must be \
auto, always, or never (was {})",
v))),
};
let test_opts = TestOpts {
filter: filter,
run_ignored: run_ignored,
run_tests: run_tests,
run_benchmarks: run_benchmarks,
logfile: logfile,
nocapture: nocapture,
color: color,
};
Some(Ok(test_opts))
}
#[derive(Clone, PartialEq)]
pub struct BenchSamples {
ns_iter_summ: stats::Summary<f64>,
mb_s: uint,
}
#[derive(Clone, PartialEq)]
pub enum TestResult {
TrOk,
TrFailed,
TrIgnored,
TrMetrics(MetricMap),
TrBench(BenchSamples),
}
unsafe impl Send for TestResult {}
enum OutputLocation<T> {
Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
Raw(T),
}
struct ConsoleTestState<T> {
log_out: Option<File>,
out: OutputLocation<T>,
use_color: bool,
total: uint,
passed: uint,
failed: uint,
ignored: uint,
measured: uint,
metrics: MetricMap,
failures: Vec<(TestDesc, Vec<u8> )> ,
max_name_len: uint, // number of columns to fill when aligning names
}
impl<T: Writer> ConsoleTestState<T> {
pub fn new(opts: &TestOpts,
_: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
let log_out = match opts.logfile {
Some(ref path) => Some(try!(File::create(path))),
None => None
};
let out = match term::stdout() {
None => Raw(io::stdio::stdout_raw()),
Some(t) => Pretty(t)
};
Ok(ConsoleTestState {
out: out,
log_out: log_out,
use_color: use_color(opts),
total: 0u,
passed: 0u,
failed: 0u,
ignored: 0u,
measured: 0u,
metrics: MetricMap::new(),
failures: Vec::new(),
max_name_len: 0u,
})
}
pub fn write_ok(&mut self) -> io::IoResult<()> {
self.write_pretty("ok", term::color::GREEN)
}
pub fn write_failed(&mut self) -> io::IoResult<()> {
self.write_pretty("FAILED", term::color::RED)
}
pub fn write_ignored(&mut self) -> io::IoResult<()> {
self.write_pretty("ignored", term::color::YELLOW)
}
pub fn write_metric(&mut self) -> io::IoResult<()> {
self.write_pretty("metric", term::color::CYAN)
}
pub fn write_bench(&mut self) -> io::IoResult<()> {
self.write_pretty("bench", term::color::CYAN)
}
pub fn write_pretty(&mut self,
word: &str,
color: term::color::Color) -> io::IoResult<()> {
match self.out {
Pretty(ref mut term) => {
if self.use_color {
try!(term.fg(color));
}
try!(term.write(word.as_bytes()));
if self.use_color {
try!(term.reset());
}
Ok(())
}
Raw(ref mut stdout) => stdout.write(word.as_bytes())
}
}
pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
match self.out {
Pretty(ref mut term) => term.write(s.as_bytes()),
Raw(ref mut stdout) => stdout.write(s.as_bytes())
}
}
pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
self.total = len;
let noun = if len != 1 { "tests" } else { "test" };
self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
}
pub fn write_test_start(&mut self, test: &TestDesc,
align: NamePadding) -> io::IoResult<()> {
let name = test.padded_name(self.max_name_len, align);
self.write_plain(format!("test {} ... ", name).as_slice())
}
pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
try!(match *result {
TrOk => self.write_ok(),
TrFailed => self.write_failed(),
TrIgnored => self.write_ignored(),
TrMetrics(ref mm) => {
try!(self.write_metric());
self.write_plain(format!(": {}", mm.fmt_metrics()).as_slice())
}
TrBench(ref bs) => {
try!(self.write_bench());
try!(self.write_plain(format!(": {}",
fmt_bench_samples(bs)).as_slice()));
Ok(())
}
});
self.write_plain("\n")
}
pub fn write_log(&mut self, test: &TestDesc,
result: &TestResult) -> io::IoResult<()> {
match self.log_out {
None => Ok(()),
Some(ref mut o) => {
let s = format!("{} {}\n", match *result {
TrOk => "ok".to_string(),
TrFailed => "failed".to_string(),
TrIgnored => "ignored".to_string(),
TrMetrics(ref mm) => mm.fmt_metrics(),
TrBench(ref bs) => fmt_bench_samples(bs)
}, test.name.as_slice());
o.write(s.as_bytes())
}
}
}
pub fn write_failures(&mut self) -> io::IoResult<()> {
try!(self.write_plain("\nfailures:\n"));
let mut failures = Vec::new();
let mut fail_out = String::new();
for &(ref f, ref stdout) in self.failures.iter() {
failures.push(f.name.to_string());
if stdout.len() > 0 {
fail_out.push_str(format!("---- {} stdout ----\n\t",
f.name.as_slice()).as_slice());
let output = String::from_utf8_lossy(stdout.as_slice());
fail_out.push_str(output.as_slice());
fail_out.push_str("\n");
}
}
if fail_out.len() > 0 {
try!(self.write_plain("\n"));
try!(self.write_plain(fail_out.as_slice()));
}
try!(self.write_plain("\nfailures:\n"));
failures.sort();
for name in failures.iter() {
try!(self.write_plain(format!(" {}\n",
name.as_slice()).as_slice()));
}
Ok(())
}
pub fn write_run_finish(&mut self) -> io::IoResult<bool> {
assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
let success = self.failed == 0u;
if !success {
try!(self.write_failures());
}
try!(self.write_plain("\ntest result: "));
if success {
// There's no parallelism at this point so it's safe to use color
try!(self.write_ok());
} else {
try!(self.write_failed());
}
let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
self.passed, self.failed, self.ignored, self.measured);
try!(self.write_plain(s.as_slice()));
return Ok(success);
}
}
pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
if bs.mb_s != 0 {
format!("{:>9} ns/iter (+/- {}) = {} MB/s",
bs.ns_iter_summ.median as uint,
(bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
bs.mb_s)
} else {
format!("{:>9} ns/iter (+/- {})",
bs.ns_iter_summ.median as uint,
(bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
}
}
// A simple console test runner
pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
match (*event).clone() {
TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
TeWait(ref test, padding) => st.write_test_start(test, padding),
TeResult(test, result, stdout) => {
try!(st.write_log(&test, &result));
try!(st.write_result(&result));
match result {
TrOk => st.passed += 1,
TrIgnored => st.ignored += 1,
TrMetrics(mm) => {
let tname = test.name.as_slice();
let MetricMap(mm) = mm;
for (k,v) in mm.iter() {
st.metrics
.insert_metric(format!("{}.{}",
tname,
k).as_slice(),
v.value,
v.noise);
}
st.measured += 1
}
TrBench(bs) => {
st.metrics.insert_metric(test.name.as_slice(),
bs.ns_iter_summ.median,
bs.ns_iter_summ.max - bs.ns_iter_summ.min);
st.measured += 1
}
TrFailed => {
st.failed += 1;
st.failures.push((test, stdout));
}
}
Ok(())
}
}
}
let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
fn len_if_padded(t: &TestDescAndFn) -> uint {
match t.testfn.padding() {
PadNone => 0u,
PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
}
}
match tests.iter().max_by(|t|len_if_padded(*t)) {
Some(t) => {
let n = t.desc.name.as_slice();
st.max_name_len = n.len();
},
None => {}
}
try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
return st.write_run_finish();
}
#[test]
fn should_sort_failures_before_printing_them() {
let test_a = TestDesc {
name: StaticTestName("a"),
ignore: false,
should_fail: ShouldFail::No
};
let test_b = TestDesc {
name: StaticTestName("b"),
ignore: false,
should_fail: ShouldFail::No
};
let mut st = ConsoleTestState {
log_out: None,
out: Raw(Vec::new()),
use_color: false,
total: 0u,
passed: 0u,
failed: 0u,
ignored: 0u,
measured: 0u,
max_name_len: 10u,
metrics: MetricMap::new(),
failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
};
st.write_failures().unwrap();
let s = match st.out {
Raw(ref m) => String::from_utf8_lossy(&m[]),
Pretty(_) => unreachable!()
};
let apos = s.find_str("a").unwrap();
let bpos = s.find_str("b").unwrap();
assert!(apos < bpos);
}
fn use_color(opts: &TestOpts) -> bool {
match opts.color {
AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
AlwaysColor => true,
NeverColor => false,
}
}
#[derive(Clone)]
enum TestEvent {
TeFiltered(Vec<TestDesc> ),
TeWait(TestDesc, NamePadding),
TeResult(TestDesc, TestResult, Vec<u8> ),
}
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
fn run_tests<F>(opts: &TestOpts,
tests: Vec<TestDescAndFn> ,
mut callback: F) -> io::IoResult<()> where
F: FnMut(TestEvent) -> io::IoResult<()>,
{
let filtered_tests = filter_tests(opts, tests);
let filtered_descs = filtered_tests.iter()
.map(|t| t.desc.clone())
.collect();
try!(callback(TeFiltered(filtered_descs)));
let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
filtered_tests.into_iter().partition(|e| {
match e.testfn {
StaticTestFn(_) | DynTestFn(_) => true,
_ => false
}
});
// It's tempting to just spawn all the tests at once, but since we have
// many tests that run in other processes we would be making a big mess.
let concurrency = get_concurrency();
let mut remaining = filtered_tests;
remaining.reverse();
let mut pending = 0;
let (tx, rx) = channel::<MonitorMsg>();
while pending > 0 || !remaining.is_empty() {
while pending < concurrency && !remaining.is_empty() {
let test = remaining.pop().unwrap();
if concurrency == 1 {
// We are doing one test at a time so we can print the name
// of the test before we run it. Useful for debugging tests
// that hang forever.
try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
}
run_test(opts, !opts.run_tests, test, tx.clone());
pending += 1;
}
let (desc, result, stdout) = rx.recv().unwrap();
if concurrency != 1 {
try!(callback(TeWait(desc.clone(), PadNone)));
}
try!(callback(TeResult(desc, result, stdout)));
pending -= 1;
}
// All benchmarks run at the end, in serial.
// (this includes metric fns)
for b in filtered_benchs_and_metrics.into_iter() {
try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
run_test(opts, !opts.run_benchmarks, b, tx.clone());
let (test, result, stdout) = rx.recv().unwrap();
try!(callback(TeResult(test, result, stdout)));
}
Ok(())
}
fn get_concurrency() -> uint {
use std::rt;
match os::getenv("RUST_TEST_TASKS") {
Some(s) => {
let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
match opt_n {
Some(n) if n > 0 => n,
_ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
}
}
None => {
rt::default_sched_threads()
}
}
}
pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
let mut filtered = tests;
// Remove tests that don't match the test filter
filtered = match opts.filter {
None => filtered,
Some(ref filter) => {
filtered.into_iter().filter(|test| {
test.desc.name.as_slice().contains(&filter[])
}).collect()
}
};
// Maybe pull out the ignored test and unignore them
filtered = if !opts.run_ignored {
filtered
} else {
fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
if test.desc.ignore {
let TestDescAndFn {desc, testfn} = test;
Some(TestDescAndFn {
desc: TestDesc {ignore: false, ..desc},
testfn: testfn
})
} else {
None
}
};
filtered.into_iter().filter_map(|x| filter(x)).collect()
};
// Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
filtered
}
pub fn run_test(opts: &TestOpts,
force_ignore: bool,
test: TestDescAndFn,
monitor_ch: Sender<MonitorMsg>) {
let TestDescAndFn {desc, testfn} = test;
if force_ignore || desc.ignore {
monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
return;
}
fn run_test_inner(desc: TestDesc,
monitor_ch: Sender<MonitorMsg>,
nocapture: bool,
testfn: Thunk) {
Thread::spawn(move || {
let (tx, rx) = channel();
let mut reader = ChanReader::new(rx);
let stdout = ChanWriter::new(tx.clone());
let stderr = ChanWriter::new(tx);
let mut cfg = thread::Builder::new().name(match desc.name {
DynTestName(ref name) => name.clone().to_string(),
StaticTestName(name) => name.to_string(),
});
if nocapture {
drop((stdout, stderr));
} else {
cfg = cfg.stdout(box stdout as Box<Writer + Send>);
cfg = cfg.stderr(box stderr as Box<Writer + Send>);
}
let result_guard = cfg.scoped(move || { testfn.invoke(()) });
let stdout = reader.read_to_end().unwrap().into_iter().collect();
let test_result = calc_result(&desc, result_guard.join());
monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
});
}
match testfn {
DynBenchFn(bencher) => {
let bs = ::bench::benchmark(|harness| bencher.run(harness));
monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
return;
}
StaticBenchFn(benchfn) => |
DynMetricFn(f) => {
let mut mm = MetricMap::new();
f.invoke(&mut mm);
monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
return;
}
StaticMetricFn(f) => {
let mut mm = MetricMap::new();
f(&mut mm);
monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
return;
}
DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
Thunk::new(move|| f()))
}
}
fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
match (&desc.should_fail, task_result) {
(&ShouldFail::No, Ok(())) |
(&ShouldFail::Yes(None), Err(_)) => TrOk,
(&ShouldFail::Yes(Some(msg)), Err(ref err))
if err.downcast_ref::<String>()
.map(|e| &**e)
.or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
.map(|e| e.contains(msg))
.unwrap_or(false) => TrOk,
_ => TrFailed,
}
}
impl MetricMap {
pub fn new() -> MetricMap {
MetricMap(BTreeMap::new())
}
/// Insert a named `value` (+/- `noise`) metric into the map. The value
/// must be non-negative. The `noise` indicates the uncertainty of the
/// metric, which doubles as the "noise range" of acceptable
/// pairwise-regressions on this named value, when comparing from one
/// metric to the next using `compare_to_old`.
///
/// If `noise` is positive, then it means this metric is of a value
/// you want to see grow smaller, so a change larger than `noise` in the
/// positive direction represents a regression.
///
/// If `noise` is negative, then it means this metric is of a value
/// you want to see grow larger, so a change larger than `noise` in the
/// negative direction represents a regression.
pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
let m = Metric {
value: value,
noise: noise
};
let MetricMap(ref mut map) = *self;
map.insert(name.to_string(), m);
}
pub fn fmt_metrics(&self) -> String {
let MetricMap(ref mm) = *self;
let v : Vec<String> = mm.iter()
.map(|(k,v)| format!("{}: {} (+/- {})", *k,
v.value as f64, v.noise as f64))
.collect();
v.connect(", ")
}
}
// Benchmarking
/// A function that is opaque to the optimizer, to allow benchmarks to
/// pretend to use outputs to assist in avoiding dead-code
/// elimination.
///
/// This function is a no-op, and does not even read from `dummy`.
pub fn black_box<T>(dummy: T) -> T {
// we need to "use" the argument in some way LLVM can't
// introspect.
unsafe {asm!("" : : "r"(&dummy))}
dummy
}
impl Bencher {
/// Callback for benchmark functions to run in their body.
pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
self.dur = Duration::span(|| {
let k = self.iterations;
for _ in range(0u64, k) {
black_box(inner());
}
});
}
pub fn ns_elapsed(&mut self) -> u64 {
self.dur.num_nanoseconds().unwrap() as u64
}
pub fn ns_per_iter(&mut self) -> u64 {
if self.iterations == 0 {
0
} else {
self.ns_elapsed() / cmp::max(self.iterations, 1)
}
}
pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
self.iterations = n;
f(self);
}
// This is a more statistics-driven benchmark algorithm
pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
// Initial bench run to get ballpark figure.
let mut n = 1_u64;
self.bench_n(n, |x| f(x));
// Try to estimate iter count for 1ms falling back to 1m
// iterations if first run took < 1ns.
if self.ns_per_iter() == 0 {
n = 1_000_000;
} else {
n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
}
// if the first run took more than 1ms we don't want to just
// be left doing 0 iterations on every loop. The unfortunate
// side effect of not being able to do as many runs is
// automatically handled by the statistical analysis below
// (i.e. larger error bars).
if n == 0 { n = 1; }
let mut total_run = Duration::nanoseconds(0);
let samples : &mut [f64] = &mut [0.0_f64; 50];
loop {
let mut summ = None;
let mut summ5 = None;
let loop_run = Duration::span(|| {
for p in samples.iter_mut() {
self.bench_n(n, |x| f(x));
*p = self.ns_per_iter() as f64;
};
stats::winsorize(samples, 5.0);
summ = Some(stats::Summary::new(samples));
for p in samples.iter_mut() {
self.bench_n(5 * n, |x| f(x));
*p = self.ns_per_iter() as f64;
};
stats::winsorize(samples, 5.0);
summ5 = Some(stats::Summary::new(samples));
});
let summ = summ.unwrap();
let summ5 = summ5.unwrap();
// If we've run for 100ms and seem to have converged to a
// stable median.
if loop_run.num_milliseconds() > 100 &&
summ.median_abs_dev_pct < 1.0 &&
summ.median - summ5.median < summ5.median_abs_dev {
return summ5;
}
total_run = total_run + loop_run;
// Longest we ever run for is 3s.
if total_run.num_seconds() > 3 {
return summ5;
}
n *= 2;
}
}
}
pub mod bench {
use std::cmp;
use std::time::Duration;
use super::{Bencher, BenchSamples};
pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
let mut bs = Bencher {
iterations: 0,
dur: Duration::nanoseconds(0),
bytes: 0
};
let ns_iter_summ = bs.auto_bench(f);
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
let iter_s = 1_000_000_000 / ns_iter;
let mb_s = (bs.bytes * iter_s) / 1_000_000;
BenchSamples {
ns_iter_summ: ns_iter_summ,
mb_s: mb_s as uint
}
}
}
#[cfg(test)]
mod tests {
use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
TestDesc, TestDescAndFn, TestOpts, run_test,
Metric, MetricMap,
StaticTestName, DynTestName, DynTestFn, ShouldFail};
use std::io::TempDir;
use std::thunk::Thunk;
use std::sync::mpsc::channel;
#[test]
pub fn do_not_run_ignored_tests() {
fn f() { panic!(); }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_fail: ShouldFail::No,
},
testfn: DynTestFn(Thunk::new(move|| f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res != TrOk);
}
#[test]
pub fn ignored_tests_result_in_ignored() {
fn f() { }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_fail: ShouldFail::No,
},
testfn: DynTestFn(Thunk::new(move|| f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrIgnored);
}
#[test]
fn test_should_fail() {
fn f() { panic!(); }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_fail: ShouldFail::Yes(None)
},
testfn: DynTestFn(Thunk::new(move|| f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrOk);
}
#[test]
fn test_should_fail_good_message() {
fn f() { panic!("an error message"); }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_fail: ShouldFail::Yes(Some("error message"))
},
testfn: DynTestFn(Thunk::new(move|| f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrOk);
}
#[test]
fn test_should_fail_bad_message() {
fn f() { panic!("an error message"); }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_fail: ShouldFail::Yes(Some("foobar"))
},
testfn: DynTestFn(Thunk::new(move|| f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrFailed);
}
#[test]
fn test_should_fail_but_succeeds() {
fn f() { }
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_fail: ShouldFail::Yes(None)
},
testfn: DynTestFn(Thunk::new(move|| f())),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrFailed);
}
#[test]
fn parse_ignored_flag() {
let args = vec!("progname".to_string(),
"filter".to_string(),
"--ignored".to_string());
let opts = match parse_opts(args.as_slice()) {
Some(Ok(o)) => o,
_ => panic!("Malformed arg in parse_ignored_flag")
};
assert!((opts.run_ignored));
}
#[test]
pub fn filter_for_ignored_option() {
// When we run ignored tests the test filter should filter out all the
// unignored tests and flip the ignore flag on the rest to false
let mut opts = TestOpts::new();
opts.run_tests = true;
opts.run_ignored = true;
let tests = vec!(
TestDescAndFn {
desc: TestDesc {
name: StaticTestName("1"),
ignore: true,
should_fail: ShouldFail::No,
},
testfn: DynTestFn(Thunk::new(move|| {})),
},
TestDescAndFn {
desc: TestDesc {
name: StaticTestName("2"),
ignore: false,
should_fail: ShouldFail::No,
},
testfn: DynTestFn(Thunk::new(move|| {})),
});
let filtered = filter_tests(&opts, tests);
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].desc.name.to_string(),
"1");
assert!(filtered[0].desc.ignore == false);
}
#[test]
pub fn sort_tests() {
let mut opts = TestOpts::new();
opts.run_tests = true;
let names =
vec!("sha1::test".to_string(),
"int::test_to_str".to_string(),
"int::test_pow".to_string(),
"test::do_not_run_ignored_tests".to_string(),
"test::ignored_tests_result_in_ignored".to_string(),
"test::first_free_arg_should_be_a_filter".to_string(),
"test::parse_ignored_flag".to_string(),
"test::filter_for_ignored_option".to_string(),
"test::sort_tests".to_string());
let tests =
{
fn testfn() { }
let mut tests = Vec::new();
for name in names.iter() {
let test = TestDescAndFn {
desc: TestDesc {
name: DynTestName((*name).clone()),
ignore: false,
should_fail: ShouldFail::No,
},
testfn: DynTestFn(Thunk::new(testfn)),
};
tests.push(test);
}
tests
};
let filtered = filter_tests(&opts, tests);
let expected =
vec!("int::test_pow".to_string(),
"int::test_to_str".to_string(),
"sha1::test".to_string(),
"test::do_not_run_ignored_tests".to_string(),
"test::filter_for_ignored_option".to_string(),
"test::first_free_arg_should_be_a_filter".to_string(),
"test::ignored_tests_result_in_ignored".to_string(),
"test::parse_ignored_flag".to_string(),
"test::sort_tests".to_string());
for (a, b) in expected.iter().zip(filtered.iter()) {
assert!(*a == b.desc.name.to_string());
}
}
#[test]
pub fn test_metricmap_compare() {
let mut m1 = MetricMap::new();
let mut m2 = MetricMap::new();
m1.insert_metric("in-both-noise", 1000.0, 200.0);
m2.insert_metric("in-both-noise", 1100.0, 200.0);
m1.insert_metric("in-first-noise", 1000.0, 2.0);
m2.insert_metric("in-second-noise", 1000.0, 2.0);
m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
}
}
| {
let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
return;
} |
recurrent_test.py | import pytest
import numpy as np
from numpy.testing import assert_allclose
import keras
from keras.utils.test_utils import layer_test
from keras.utils.test_utils import keras_test
from keras.layers import recurrent
from keras.layers import embeddings
from keras.models import Sequential
from keras.models import Model
from keras.engine.topology import Input
from keras.layers.core import Masking
from keras import regularizers
from keras import backend as K
num_samples, timesteps, embedding_dim, units = 2, 5, 4, 3
embedding_num = 12
@keras_test
def rnn_test(f):
"""
All the recurrent layers share the same interface,
so we can run through them with a single function.
"""
f = keras_test(f)
return pytest.mark.parametrize('layer_class', [
recurrent.SimpleRNN,
recurrent.GRU,
recurrent.LSTM
])(f)
@rnn_test
def test_return_sequences(layer_class):
layer_test(layer_class,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@rnn_test
def test_dynamic_behavior(layer_class):
layer = layer_class(units, input_shape=(None, embedding_dim))
model = Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
@rnn_test
def test_stateful_invalid_use(layer_class):
layer = layer_class(units,
stateful=True,
batch_input_shape=(num_samples,
timesteps,
embedding_dim))
model = Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples * 2, timesteps, embedding_dim))
y = np.random.random((num_samples * 2, units))
with pytest.raises(ValueError):
model.fit(x, y)
with pytest.raises(ValueError):
model.predict(x, batch_size=num_samples + 1)
@rnn_test
@pytest.mark.skipif((K.backend() == 'cntk'),
reason='Not yet supported.')
def test_dropout(layer_class):
for unroll in [True, False]:
layer_test(layer_class,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1,
'unroll': unroll},
input_shape=(num_samples, timesteps, embedding_dim))
# Test that dropout is applied during training
x = K.ones((num_samples, timesteps, embedding_dim))
layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
input_shape=(timesteps, embedding_dim))
y = layer(x)
assert y._uses_learning_phase
y = layer(x, training=True)
assert not getattr(y, '_uses_learning_phase')
# Test that dropout is not applied during testing
x = np.random.random((num_samples, timesteps, embedding_dim))
layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
unroll=unroll,
input_shape=(timesteps, embedding_dim))
model = Sequential([layer])
assert model.uses_learning_phase
y1 = model.predict(x)
y2 = model.predict(x)
assert_allclose(y1, y2)
@rnn_test
def test_statefulness(layer_class):
|
@rnn_test
def test_masking_correctness(layer_class):
# Check masking: output with left padding and right padding
# should be the same.
model = Sequential()
model.add(embeddings.Embedding(embedding_num, embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(units, return_sequences=False)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
assert_allclose(out7, out6, atol=1e-5)
@rnn_test
def test_implementation_mode(layer_class):
for mode in [1, 2]:
# Without dropout
layer_test(layer_class,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
# With dropout
layer_test(layer_class,
kwargs={'units': units,
'implementation': mode,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
# Without bias
layer_test(layer_class,
kwargs={'units': units,
'implementation': mode,
'use_bias': False},
input_shape=(num_samples, timesteps, embedding_dim))
@rnn_test
def test_regularizer(layer_class):
layer = layer_class(units, return_sequences=False, weights=None,
input_shape=(timesteps, embedding_dim),
kernel_regularizer=regularizers.l1(0.01),
recurrent_regularizer=regularizers.l1(0.01),
bias_regularizer='l2')
layer.build((None, None, embedding_dim))
assert len(layer.losses) == 3
assert len(layer.cell.losses) == 3
layer = layer_class(units, return_sequences=False, weights=None,
input_shape=(timesteps, embedding_dim),
activity_regularizer='l2')
assert layer.activity_regularizer
x = K.variable(np.ones((num_samples, timesteps, embedding_dim)))
layer(x)
assert len(layer.cell.get_losses_for(x)) == 0
assert len(layer.get_losses_for(x)) == 1
@rnn_test
def test_trainability(layer_class):
layer = layer_class(units)
layer.build((None, None, embedding_dim))
assert len(layer.weights) == 3
assert len(layer.trainable_weights) == 3
assert len(layer.non_trainable_weights) == 0
layer.trainable = False
assert len(layer.weights) == 3
assert len(layer.trainable_weights) == 0
assert len(layer.non_trainable_weights) == 3
layer.trainable = True
assert len(layer.weights) == 3
assert len(layer.trainable_weights) == 3
assert len(layer.non_trainable_weights) == 0
@keras_test
def test_masking_layer():
''' This test based on a previously failing issue here:
https://github.com/fchollet/keras/issues/1567
'''
inputs = np.random.random((6, 3, 4))
targets = np.abs(np.random.random((6, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = Sequential()
model.add(Masking(input_shape=(3, 4)))
model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1)
model = Sequential()
model.add(Masking(input_shape=(3, 4)))
model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=True))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1)
@rnn_test
def test_from_config(layer_class):
stateful_flags = (False, True)
for stateful in stateful_flags:
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
@rnn_test
def test_specify_initial_state_keras_tensor(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with Keras tensor
inputs = Input((timesteps, embedding_dim))
initial_state = [Input((units,)) for _ in range(num_states)]
layer = layer_class(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
assert initial_state[0] in layer.inbound_nodes[0].input_tensors
model = Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
@rnn_test
def test_specify_initial_state_non_keras_tensor(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with non-Keras tensor
inputs = Input((timesteps, embedding_dim))
initial_state = [K.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)]
layer = layer_class(units)
output = layer(inputs, initial_state=initial_state)
model = Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.fit(inputs, targets)
@rnn_test
def test_reset_states_with_values(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
layer = layer_class(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
np.testing.assert_allclose(K.eval(layer.states[0]),
np.zeros(K.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [K.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
np.testing.assert_allclose(K.eval(layer.states[0]),
np.ones(K.int_shape(layer.states[0])),
atol=1e-4)
# Test fit with invalid data
with pytest.raises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
@rnn_test
def test_initial_states_as_other_inputs(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with Keras tensor
main_inputs = Input((timesteps, embedding_dim))
initial_state = [Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
assert initial_state[0] in layer.inbound_nodes[0].input_tensors
model = Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
@rnn_test
def test_specify_state_with_masking(layer_class):
''' This test based on a previously failing issue here:
https://github.com/fchollet/keras/issues/1567
'''
num_states = 2 if layer_class is recurrent.LSTM else 1
inputs = Input((timesteps, embedding_dim))
_ = Masking()(inputs)
initial_state = [Input((units,)) for _ in range(num_states)]
output = layer_class(units)(inputs, initial_state=initial_state)
model = Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
@rnn_test
def test_return_state(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = layer_class(units, return_state=True, stateful=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
assert len(state) == num_states
model = Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
np.testing.assert_allclose(K.eval(layer.states[0]), state, atol=1e-4)
@rnn_test
def test_state_reuse(layer_class):
inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = layer_class(units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = layer_class(units)(output, initial_state=state)
model = Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
outputs = model.predict(inputs)
@keras_test
def test_minimal_rnn_cell_non_layer():
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = recurrent.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32)]
layer = recurrent.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
@keras_test
def test_minimal_rnn_cell_non_layer_multiple_states():
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = recurrent.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16)]
layer = recurrent.RNN(cells)
assert layer.cell.state_size == (32, 32, 16, 16, 8, 8)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
@keras_test
def test_minimal_rnn_cell_layer():
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(MinimalRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = recurrent.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = recurrent.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
assert_allclose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8),
MinimalRNNCell(12),
MinimalRNNCell(32)]
layer = recurrent.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = recurrent.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
assert_allclose(y_np, y_np_2, atol=1e-4)
@keras_test
def test_stacked_rnn_attributes():
cells = [recurrent.LSTMCell(3),
recurrent.LSTMCell(3, kernel_regularizer='l2')]
layer = recurrent.RNN(cells)
layer.build((None, None, 5))
# Test regularization losses
assert len(layer.losses) == 1
# Test weights
assert len(layer.trainable_weights) == 6
cells[0].trainable = False
assert len(layer.trainable_weights) == 3
assert len(layer.non_trainable_weights) == 3
# Test `get_losses_for`
x = keras.Input((None, 5))
y = K.sum(x)
cells[0].add_loss(y, inputs=x)
assert layer.get_losses_for(x) == [y]
@rnn_test
def test_batch_size_equal_one(layer_class):
inputs = Input(batch_shape=(1, timesteps, embedding_dim))
layer = layer_class(units)
outputs = layer(inputs)
model = Model(inputs, outputs)
model.compile('sgd', 'mse')
x = np.random.random((1, timesteps, embedding_dim))
y = np.random.random((1, units))
model.train_on_batch(x, y)
def test_rnn_cell_with_constants_layer():
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
cell = RNNCellWithConstants(32)
layer = recurrent.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
assert_allclose(y_np, y_np_2, atol=1e-4)
# test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer([x, c])
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
assert_allclose(y_np, y_np_3, atol=1e-4)
def test_rnn_cell_with_constants_layer_passing_initial_state():
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
s = keras.Input((32,))
cell = RNNCellWithConstants(32)
layer = recurrent.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_np, c_np])
assert_allclose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_np + 10., c_np])
with pytest.raises(AssertionError):
assert_allclose(y_np, y_np_2_different_s, atol=1e-4)
# test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer([x, s, c])
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_np, c_np])
assert_allclose(y_np, y_np_3, atol=1e-4)
if __name__ == '__main__':
pytest.main([__file__])
| model = Sequential()
model.add(embeddings.Embedding(embedding_num, embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(units, return_sequences=False,
stateful=True,
weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
assert(out1.shape == (num_samples, units))
# train once so that the states change
model.train_on_batch(np.ones((num_samples, timesteps)),
np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
assert(out1.max() != out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
assert(out2.max() != out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
assert(out4.max() != out5.max()) |
backup.rs | use std::path::PathBuf;
use nmk::backup::backup_files;
use nmk::path::NmkHome; |
pub fn backup(options: Backup) -> nmk::Result<()> {
let output_path = options
.output
.unwrap_or_else(|| PathBuf::from("nmk-backup.tar"));
let nmk_home = NmkHome::locate();
backup_files(nmk_home.path(), &output_path)?;
Ok(())
} |
use crate::cmdline::Backup; |
system.go | package messages
// SystemMessageLevel implements four levels for messages and is used in conjunction with the ParserMessage type.
type SystemMessageLevel int
const (
levelWarning SystemMessageLevel = iota
levelError
)
var systemMessageLevels = [...]string{
"WARNING",
"ERROR",
}
// String implments Stringer and return a string of the SystemMessageLevel.
func (s SystemMessageLevel) String() string { return systemMessageLevels[s] }
// FromString returns the SystemMessageLevel converted from the string name.
func SystemMessageLevelFromString(name string) SystemMessageLevel | {
for num, sLvl := range systemMessageLevels {
if name == sLvl {
return SystemMessageLevel(num)
}
}
return -1
} |
|
card.service.ts | import { HttpException, HttpStatus, Injectable } from '@nestjs/common';
import { InjectRepository } from '@nestjs/typeorm';
import { ColumnEntity } from 'src/column/column.entity';
import { CommentEntity } from 'src/comment/comment.entity';
import { UserEntity } from 'src/user/user.entity';
import { Repository } from 'typeorm';
import { CardDTO } from './card.dto';
import { CardEntity } from './card.entity';
@Injectable()
export class CardService {
constructor(
@InjectRepository(CardEntity)
private cardRepository: Repository<CardEntity>,
@InjectRepository(ColumnEntity)
private columnRepository: Repository<ColumnEntity>,
@InjectRepository(UserEntity)
private userRepository: Repository<UserEntity>,
@InjectRepository(CommentEntity)
private commentRepository: Repository<CommentEntity>
) {}
async getAll(columnId: number) {
let cards = await this.cardRepository.find({
where: {column: columnId},
relations: ['owner', 'column']
});
if (!cards) throw new HttpException('Column not found', HttpStatus.NOT_FOUND);
return cards.map(card => card.response());
}
async getOne(cardId: number) {
const card = await this.cardRepository.findOne({
where: {id: cardId},
relations: ['owner', 'column', 'comments']
});
if (!card) throw new HttpException('Card not found', HttpStatus.NOT_FOUND);
let comments = (await this.commentRepository.find({
where:{card:cardId},
relations:['owner']
}))
return {
...card.response(),
comments: comments.map(comm => {
return {
id: comm.id,
owner: {id: comm.owner.id, username: comm.owner.username},
content: comm.content
}
})
};
}
async create(ownerId: number, columnId: number, data: CardDTO) {
const user = await this.userRepository.findOne({where:{id:ownerId}});
if (!user) throw new HttpException('Incorrect user.', HttpStatus.UNAUTHORIZED);
const column = await this.columnRepository.findOne({
where:{id:columnId},
relations:['owner']
});
if (!column) throw new HttpException('Column not found.', HttpStatus.NOT_FOUND);
if (column.owner.id != ownerId) throw new HttpException('Incorrect user.', HttpStatus.UNAUTHORIZED);
const card = this.cardRepository.create({
...data,
owner: user,
column: column
})
await this.cardRepository.save(card);
return card.response();
}
async update(id: number, ownerId: number, data: CardDTO) {
const user = await this.userRepository.findOne({where:{id:ownerId}});
if (!user) throw new HttpException('Incorrect user.', HttpStatus.UNAUTHORIZED);
let card = await this.cardRepository.findOne({
where: {id},
relations: ['owner']
});
if (!card) throw new HttpException('Card not found.', HttpStatus.NOT_FOUND);
card.checkOwner(ownerId);
this.cardRepository.update({id}, data); | });
card.comments = await this.commentRepository.find({
where:{card},
relations: ['owner']
});
return card.response();
}
async delete(id: number, ownerId: number) {
const user = await this.userRepository.findOne({where:{id:ownerId}});
if (!user) throw new HttpException('Incorrect user.', HttpStatus.UNAUTHORIZED);
const card = await this.cardRepository.findOne({
where:{id},
relations:['owner','column','comments']
});
if (!card) throw new HttpException('Card not found.', HttpStatus.NOT_FOUND);
card.checkOwner(ownerId);
card.comments = await this.commentRepository.find({
where:{card},
relations: ['owner']
});
this.cardRepository.delete({id});
return card.response();
}
} | card = await this.cardRepository.findOne({
where: {id},
relations: ['owner','column','comments'] |
isArray.ts | /**
* Returns true if given value is an Array.
* @name isArray<A = any>(x: any): x is Array<A>
*/
export function | <A = any>(x: any): x is Array<A> {
return Array.isArray(x)
}
| isArray |
onehot.py | from torch.nn.functional import one_hot
from operations.base import Operator
class OneHot(Operator):
| def __init__(self, dim=-1, non_zero_values_only=False):
self.dim = dim
self.non_zero_values_only = non_zero_values_only
super().__init__()
def forward(self, indices, depth, values):
if self.non_zero_values_only:
off_value, on_value = -1, 1
else:
off_value, on_value = values
out = one_hot(indices.to(int), depth.to(int).item())
out = out * (on_value - off_value) + off_value
rank = len(indices.shape)
if self.dim < 0:
self.dim += rank + 1
if not rank == self.dim: # permute only if dim not last dimension
order = list(range(len(indices.shape)))
order.insert(self.dim, -1)
out = out.permute(order)
return out |
|
check_commit.rs | use crate::object::{parse_utils, ContentSource, ContentSourceResult};
pub(crate) fn commit_is_valid(s: &dyn ContentSource) -> ContentSourceResult<bool> {
let mut r = s.open()?;
if let Some(line) = parse_utils::read_line(&mut r)? {
if let Some(tree_id) = parse_utils::header(&line.as_slice(), b"tree") {
if !parse_utils::object_id_is_valid(&tree_id) {
return Ok(false);
}
} else {
return Ok(false);
}
} else {
return Ok(false);
}
let line = loop {
if let Some(line) = parse_utils::read_line(&mut r)? {
if let Some(parent_id) = parse_utils::header(&line.as_slice(), b"parent") {
if !parse_utils::object_id_is_valid(&parent_id) {
return Ok(false);
}
} else {
break line;
}
} else {
return Ok(false);
}
};
if let Some(_author) = parse_utils::header(&line.as_slice(), b"author") {
if !parse_utils::attribution_is_valid(&line) {
return Ok(false);
}
} else {
return Ok(false);
}
if let Some(line) = parse_utils::read_line(&mut r)? {
if let Some(_committer) = parse_utils::header(&line.as_slice(), b"committer") {
if !parse_utils::attribution_is_valid(&line) {
return Ok(false);
}
} else {
return Ok(false);
}
} else {
return Ok(false);
}
Ok(true)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn invalid_empty() {
let cs = "".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
}
#[test]
fn invalid_only_tree() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
}
#[test]
fn valid_no_parent() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author A. U. Thor <author@localhost> 1 +0000\n\
committer A. U. Thor <author@localhost> 1 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), true);
}
#[test]
fn valid_blank_author() |
#[test]
fn invalid_corrupt_attribution() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author <> 0 +0000\n\
committer b <b@c> <b@c> 0 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
}
#[test]
fn valid_parents() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
parent be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author A. U. Thor <author@localhost> 1 +0000\n\
committer A. U. Thor <author@localhost> 1 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), true);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
parent be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
parent be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author A. U. Thor <author@localhost> 1 +0000\n\
committer A. U. Thor <author@localhost> 1 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), true);
}
#[test]
fn valid_normal_time() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author A. U. Thor <author@localhost> 1222757360 -0730\n\
committer A. U. Thor <author@localhost> 1222757360 -0730\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), true);
}
#[test]
fn invalid_tree() {
let cs = "parent be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "trie be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "treebe9bfa841874ccc9f2ef7c48d0c76226f89b7189\n".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree\tbe9bfa841874ccc9f2ef7c48d0c76226f89b7189\n".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree zzzzfa841874ccc9f2ef7c48d0c76226f89b7189\n".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189z\n".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9b\n".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n".to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
}
#[test]
fn invalid_parent() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
parent \n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
parent zzzzfa841874ccc9f2ef7c48d0c76226f89b7189\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
parent be9bfa841874ccc9f2ef7c48d0c76226f89b7189 \n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
parent be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
parent be9bfa841874ccc9f2ef7c48d0c76226f89b7189z\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
parent\tbe9bfa841874ccc9f2ef7c48d0c76226f89b7189\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
}
#[test]
fn invalid_no_author() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
committer A. U. Thor <author@localhost> 1 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
}
#[test]
fn invalid_author() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author b <b@c> <b@c> 0 +0000\n\
committer <> 0 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author A. U. Thor <foo 1 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author A. U. Thor foo> 1 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author 1 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author a <b> +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author a <b>\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author a <b> z\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author a <b> 1 z\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
}
#[test]
fn invalid_no_committer() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author A. U. Thor <author@localhost> 1 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author A. U. Thor <author@localhost> 1 +0000\n\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
}
#[test]
fn invalid_committer() {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author a <b> 1 +0000\n\
committer a <"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), false);
}
}
| {
let cs = "tree be9bfa841874ccc9f2ef7c48d0c76226f89b7189\n\
author <> 0 +0000\n\
committer <> 0 +0000\n"
.to_string();
assert_eq!(commit_is_valid(&cs).unwrap(), true);
} |
consul-util.go | package run
import (
"time"
consulapi "github.com/hashicorp/consul/api"
)
func newConsulKVClient(consulAddress string, consulTimeout time.Duration) (*consulapi.KV, error) {
// Get a new client
clientAPI := consulapi.DefaultConfig()
clientAPI.Address = consulAddress
clientAPI.WaitTime = consulTimeout
client, err := consulapi.NewClient(clientAPI)
if err != nil {
return nil, err
}
return client.KV(), nil
}
func isKeyExist(consulKVClient *consulapi.KV, keyPath string) (bool, error) {
value, meta, err := consulKVClient.Get(keyPath, nil)
if err != nil |
if value != nil && meta != nil {
return true, nil
}
return false, nil
}
| {
return false, err
} |
kopia_meta.go | package snapmeta
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"github.com/kopia/kopia/tests/tools/kopiarunner"
)
var _ Persister = &kopiaMetadata{}
// kopiaMetadata handles metadata persistency of a snapshot store, using a Kopia
// repository as the persistency mechanism
type kopiaMetadata struct {
*Simple
localMetadataDir string
snap *kopiarunner.KopiaSnapshotter
}
// New instantiates a new Persister and returns it.
func New() (Persister, error) {
localDir, err := ioutil.TempDir("", "kopia-local-metadata-")
if err != nil {
return nil, err
}
snap, err := kopiarunner.NewKopiaSnapshotter()
if err != nil {
return nil, err
}
return &kopiaMetadata{
localMetadataDir: localDir,
Simple: NewSimple(),
snap: snap,
}, nil
}
// Cleanup cleans up the local temporary files used by a KopiaMetadata
func (store *kopiaMetadata) Cleanup() {
if store.localMetadataDir != "" {
os.RemoveAll(store.localMetadataDir) //nolint:errcheck
}
if store.snap != nil {
store.snap.Cleanup()
}
}
// ConnectOrCreateS3 implements the RepoManager interface, connects to a repo in an S3
// bucket or attempts to create one if connection is unsuccessful
func (store *kopiaMetadata) ConnectOrCreateS3(bucketName, pathPrefix string) error {
return store.snap.ConnectOrCreateS3(bucketName, pathPrefix)
}
// ConnectOrCreateFilesystem implements the RepoManager interface, connects to a repo in the filesystem
// or attempts to create one if connection is unsuccessful
func (store *kopiaMetadata) ConnectOrCreateFilesystem(path string) error {
return store.snap.ConnectOrCreateFilesystem(path)
}
// LoadMetadata implements the DataPersister interface, restores the latest
// snapshot from the kopia repository and decodes its contents, populating
// its metadata on the snapshots residing in the target test repository.
func (store *kopiaMetadata) LoadMetadata() error {
snapIDs, err := store.snap.ListSnapshots()
if err != nil {
return err
}
if len(snapIDs) == 0 {
return nil // No snapshot IDs fouund in repository
}
lastSnapID := snapIDs[len(snapIDs)-1]
restorePath := filepath.Join(store.localMetadataDir, "kopia-metadata-latest")
err = store.snap.RestoreSnapshot(lastSnapID, restorePath)
if err != nil {
return err
}
defer os.Remove(restorePath) //nolint:errcheck
f, err := os.Open(restorePath) //nolint:gosec
if err != nil {
return err
}
err = json.NewDecoder(f).Decode(&(store.Simple.m))
if err != nil {
return err
}
return nil
} | // FlushMetadata implements the DataPersister interface, flushing the local
// metadata on the target test repo's snapshots to the metadata Kopia repository
// as a snapshot create.
func (store *kopiaMetadata) FlushMetadata() error {
f, err := ioutil.TempFile(store.localMetadataDir, "kopia-metadata-")
if err != nil {
return err
}
defer func() {
f.Close() //nolint:errcheck
os.Remove(f.Name()) //nolint:errcheck
}()
err = json.NewEncoder(f).Encode(store.Simple.m)
if err != nil {
return err
}
_, err = store.snap.CreateSnapshot(f.Name())
if err != nil {
return err
}
return nil
} | |
SourceCodeLineManager.js | /** Source Code Line Manager Class
This class takes some Source Code Adapter to construct its indexed collection of source
code lines. This manager will command the Source Code Lines.
Rules:
- A Source Code Adapter must be passed in the constructor
- Assumes existence of SourceCode class
- Uses a Source Code Line Collection to hold source code lines
*/
function | (adapter, line_factory, empty_collection) {
// Uses a SourceCodeLineCollection to hold source code lines
this.collection = empty_collection;
// Take the passed adapter, and start pulling DOM nodes and indexes (line numbers)
var source_nodes = adapter.getSourceNodes();
for (var i = 0; i < source_nodes.length; i++) {
// i + 1: source code lines are not 0 indexed
this.collection.set(i + 1, line_factory.build(source_nodes[i]));
}
}
// Given a DOM node, see if it's one of the DOM nodes representing the source code,
// and if so, return it's line number. If not, return -1.
SourceCodeLineManager.prototype.getLineNumber = function (line_node) {
return this.collection.getLineNumOfNode(line_node) + 1;
};
// Return the Source Code Line at line_num
SourceCodeLineManager.prototype.getLine = function (line_num) {
return this.collection.get(line_num);
};
| SourceCodeLineManager |
cart_po.py | # Ranorex Webtestit Page Object File
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pageobjects.checkout_po import CheckoutPo
# Additional data: {"img":"screenshots/7c3f4e67-7fc6-4a37-ddeb-078a7513eb14.png"}
class CartPo:
# Additional data: {"img":"screenshots/df6602e4-0a91-165e-d8b6-29fd07c116f7.png"}
_proceed_to_checkout = (By.CSS_SELECTOR, ".checkout-button")
"""
NOTE: Use Ranorex Selocity or the Elements Panel to generate element code
"""
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def | (self, url):
self.driver.get(url)
return self
def get_title(self):
return self.driver.title
"""
NOTE: Drag elements from the Elements panel into the code editor to
generate methods. Drag elements into existing methods to add steps.
"""
def click_proceed_to_checkout(self):
self.wait.until(
EC.visibility_of_element_located(
self._proceed_to_checkout)).click()
return CheckoutPo(self.driver)
| open |
scroll.rs | use movie::actor;
use serde_derive::Deserialize;
use crate::gui::GuiThread;
use crate::x::xdotool;
use crate::x::xlib::{Event, XLib};
use std::time::{Duration, Instant};
#[derive(Deserialize, Clone, Debug)]
#[serde(deny_unknown_fields)]
pub struct ScrollConfig {
pub device: String,
pub subdevice: u32,
pub hold: bool,
pub speed: u32,
pub button_id: u8,
pub indicator: bool,
pub indicator_size: u16,
pub cancel_on_keypress: bool,
}
// TODO: ScrollThread and GuiThread could be merged
pub struct Scroll<'a> {
config: &'a ScrollConfig,
source_id: u32,
scroll_thread: Option<ScrollThread::Handle>,
gui_thread: GuiThread::Handle,
last_event_time: Instant,
}
impl<'a> Scroll<'a> {
pub fn new(config: &'a ScrollConfig, x: &mut XLib) -> Self |
#[allow(clippy::if_same_then_else)]
#[allow(clippy::collapsible_if)]
pub fn handle(&mut self, ev: &Event) {
use x11::xinput2::*;
if ev.source_id == self.source_id && ev.detail == self.config.button_id {
if ev.kind == XI_RawButtonPress {
self.last_event_time = Instant::now();
self.toggle();
} else if self.config.hold && ev.kind == XI_RawButtonRelease {
self.toggle();
if Instant::now().duration_since(self.last_event_time).as_millis() < 500 {
xdotool::middle_click()
}
}
} else if self.config.cancel_on_keypress && ev.kind == XI_RawKeyPress {
if self.scroll_thread.is_some() {
self.toggle();
}
}
}
pub fn toggle(&mut self) {
if let Some(scroll_thread) = self.scroll_thread.take() {
if self.config.indicator {
self.gui_thread.send(GuiThread::Input::HideCrosshair);
}
scroll_thread.stop();
} else {
if self.config.indicator {
self.gui_thread.send(GuiThread::Input::ShowCrosshair);
}
self.scroll_thread = Some(
ScrollThread::Actor {
speed: self.config.speed as i64,
}
.start(),
);
}
}
}
actor! {
ScrollThread
data:
pub speed: i64,
on_init:
let original_y = xdotool::get_current_y();
let mut progress_towards_next_event: i64 = 0;
tick_interval: 16,
on_tick:
let current_y = xdotool::get_current_y();
let diff = i64::from(current_y) - i64::from(original_y);
if diff < 0 && progress_towards_next_event > 0 {
progress_towards_next_event = 0;
}
if diff > 0 && progress_towards_next_event < 0 {
progress_towards_next_event = 0;
}
// 4 below is compensating for change in interval (from 4 to 16)
progress_towards_next_event += 4 * diff * self.speed;
const THRESHOLD: i64 = 1_000_000_000;
if progress_towards_next_event > THRESHOLD {
while progress_towards_next_event > THRESHOLD {
xdotool::scroll_down();
progress_towards_next_event -= THRESHOLD;
}
} else if progress_towards_next_event < -THRESHOLD {
while progress_towards_next_event < -THRESHOLD {
xdotool::scroll_up();
progress_towards_next_event += THRESHOLD;
}
}
}
| {
use x11::xinput2::*;
let source_id = x
.get_device_id(&config.device, config.subdevice)
.expect("Incorrect device configuration for scrolling feature");
x.grab(&[XI_RawButtonPress, XI_RawButtonRelease]);
if config.cancel_on_keypress {
x.grab(&[XI_RawKeyPress]);
}
let gui_thread = GuiThread::Actor {
crosshair_size: config.indicator_size,
}
.start();
Self {
config,
source_id,
scroll_thread: None,
gui_thread,
last_event_time: Instant::now()
}
} |
scmd_svshold.go | package ircserver
import (
"fmt"
"time"
"gopkg.in/sorcix/irc.v2"
)
func init() { | MinParams: 1,
}
}
func (i *IRCServer) cmdServerSvshold(s *Session, reply *Replyctx, msg *irc.Message) {
// SVSHOLD <nick> [<expirationtimerelative> :<reason>]
nick := NickToLower(msg.Params[0])
if len(msg.Params) > 1 {
duration, err := time.ParseDuration(msg.Params[1] + "s")
if err != nil {
i.sendServices(reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.NOTICE,
Params: []string{s.ircPrefix.Name, fmt.Sprintf("Invalid duration: %v", err)},
})
return
}
i.svsholds[nick] = svshold{
added: s.LastActivity,
duration: duration,
reason: msg.Trailing(),
}
} else {
delete(i.svsholds, nick)
}
} | Commands["server_SVSHOLD"] = &ircCommand{
Func: (*IRCServer).cmdServerSvshold, |
H5core.js | /* Zepto v1.2.0 - zepto event ajax form ie - zeptojs.com/license */
!function (t, e) {
"function" == typeof define && define.amd ? define(function () {
return e(t)
}) : e(t)
}(this, function (t) {
var e = function () {
function $(t) {
return null == t ? String(t) : S[C.call(t)] || "object"
}
function F(t) {
return "function" == $(t)
}
function k(t) {
return null != t && t == t.window
}
function M(t) {
return null != t && t.nodeType == t.DOCUMENT_NODE
}
function R(t) {
return "object" == $(t)
}
function Z(t) {
return R(t) && !k(t) && Object.getPrototypeOf(t) == Object.prototype
}
function z(t) {
var e = !!t && "length" in t && t.length, n = r.type(t);
return "function" != n && !k(t) && ("array" == n || 0 === e || "number" == typeof e && e > 0 && e - 1 in t)
}
function q(t) {
return a.call(t, function (t) {
return null != t
})
}
function H(t) {
return t.length > 0 ? r.fn.concat.apply([], t) : t
}
function I(t) {
return t.replace(/::/g, "/").replace(/([A-Z]+)([A-Z][a-z])/g, "$1_$2").replace(/([a-z\d])([A-Z])/g, "$1_$2").replace(/_/g, "-").toLowerCase()
}
function V(t) {
return t in l ? l[t] : l[t] = new RegExp("(^|\\s)" + t + "(\\s|$)")
}
function _(t, e) {
return "number" != typeof e || h[I(t)] ? e : e + "px"
}
function B(t) {
var e, n;
return c[t] || (e = f.createElement(t), f.body.appendChild(e), n = getComputedStyle(e, "").getPropertyValue("display"), e.parentNode.removeChild(e), "none" == n && (n = "block"), c[t] = n), c[t]
}
function U(t) {
return "children" in t ? u.call(t.children) : r.map(t.childNodes, function (t) {
return 1 == t.nodeType ? t : void 0
})
}
function X(t, e) {
var n, r = t ? t.length : 0;
for (n = 0; r > n; n++)this[n] = t[n];
this.length = r, this.selector = e || ""
}
function J(t, r, i) {
for (n in r)i && (Z(r[n]) || L(r[n])) ? (Z(r[n]) && !Z(t[n]) && (t[n] = {}), L(r[n]) && !L(t[n]) && (t[n] = []), J(t[n], r[n], i)) : r[n] !== e && (t[n] = r[n])
}
function W(t, e) {
return null == e ? r(t) : r(t).filter(e)
}
function Y(t, e, n, r) {
return F(e) ? e.call(t, n, r) : e
}
function G(t, e, n) {
null == n ? t.removeAttribute(e) : t.setAttribute(e, n)
}
function K(t, n) {
var r = t.className || "", i = r && r.baseVal !== e;
return n === e ? i ? r.baseVal : r : void(i ? r.baseVal = n : t.className = n)
}
function Q(t) {
try {
return t ? "true" == t || ("false" == t ? !1 : "null" == t ? null : +t + "" == t ? +t : /^[\[\{]/.test(t) ? r.parseJSON(t) : t) : t
} catch (e) {
return t
}
}
function tt(t, e) {
e(t);
for (var n = 0, r = t.childNodes.length; r > n; n++)tt(t.childNodes[n], e)
}
var e, n, r, i, O, P, o = [], s = o.concat, a = o.filter, u = o.slice, f = t.document, c = {}, l = {},
h = {"column-count": 1, columns: 1, "font-weight": 1, "line-height": 1, opacity: 1, "z-index": 1, zoom: 1},
p = /^\s*<(\w+|!)[^>]*>/, d = /^<(\w+)\s*\/?>(?:<\/\1>|)$/,
m = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi, g = /^(?:body|html)$/i,
v = /([A-Z])/g, y = ["val", "css", "html", "text", "data", "width", "height", "offset"],
x = ["after", "prepend", "before", "append"], b = f.createElement("table"), E = f.createElement("tr"),
j = {tr: f.createElement("tbody"), tbody: b, thead: b, tfoot: b, td: E, th: E, "*": f.createElement("div")},
w = /complete|loaded|interactive/, T = /^[\w-]*$/, S = {}, C = S.toString, N = {},
A = f.createElement("div"), D = {
tabindex: "tabIndex",
readonly: "readOnly",
"for": "htmlFor",
"class": "className",
maxlength: "maxLength",
cellspacing: "cellSpacing",
cellpadding: "cellPadding",
rowspan: "rowSpan",
colspan: "colSpan",
usemap: "useMap",
frameborder: "frameBorder",
contenteditable: "contentEditable"
}, L = Array.isArray || function (t) {
return t instanceof Array
};
return N.matches = function (t, e) {
if (!e || !t || 1 !== t.nodeType)return !1;
var n = t.matches || t.webkitMatchesSelector || t.mozMatchesSelector || t.oMatchesSelector || t.matchesSelector;
if (n)return n.call(t, e);
var r, i = t.parentNode, o = !i;
return o && (i = A).appendChild(t), r = ~N.qsa(i, e).indexOf(t), o && A.removeChild(t), r
}, O = function (t) {
return t.replace(/-+(.)?/g, function (t, e) {
return e ? e.toUpperCase() : ""
})
}, P = function (t) {
return a.call(t, function (e, n) {
return t.indexOf(e) == n
})
}, N.fragment = function (t, n, i) {
var o, s, a;
return d.test(t) && (o = r(f.createElement(RegExp.$1))), o || (t.replace && (t = t.replace(m, "<$1></$2>")), n === e && (n = p.test(t) && RegExp.$1), n in j || (n = "*"), a = j[n], a.innerHTML = "" + t, o = r.each(u.call(a.childNodes), function () {
a.removeChild(this)
})), Z(i) && (s = r(o), r.each(i, function (t, e) {
y.indexOf(t) > -1 ? s[t](e) : s.attr(t, e)
})), o
}, N.Z = function (t, e) {
return new X(t, e)
}, N.isZ = function (t) {
return t instanceof N.Z
}, N.init = function (t, n) {
var i;
if (!t)return N.Z();
if ("string" == typeof t)if (t = t.trim(), "<" == t[0] && p.test(t)) i = N.fragment(t, RegExp.$1, n), t = null; else {
if (n !== e)return r(n).find(t);
i = N.qsa(f, t)
} else {
if (F(t))return r(f).ready(t);
if (N.isZ(t))return t;
if (L(t)) i = q(t); else if (R(t)) i = [t], t = null; else if (p.test(t)) i = N.fragment(t.trim(), RegExp.$1, n), t = null; else {
if (n !== e)return r(n).find(t);
i = N.qsa(f, t)
}
}
return N.Z(i, t)
}, r = function (t, e) {
return N.init(t, e)
}, r.extend = function (t) {
var e, n = u.call(arguments, 1);
return "boolean" == typeof t && (e = t, t = n.shift()), n.forEach(function (n) {
J(t, n, e)
}), t
}, N.qsa = function (t, e) {
var n, r = "#" == e[0], i = !r && "." == e[0], o = r || i ? e.slice(1) : e, s = T.test(o);
return t.getElementById && s && r ? (n = t.getElementById(o)) ? [n] : [] : 1 !== t.nodeType && 9 !== t.nodeType && 11 !== t.nodeType ? [] : u.call(s && !r && t.getElementsByClassName ? i ? t.getElementsByClassName(o) : t.getElementsByTagName(e) : t.querySelectorAll(e))
}, r.contains = f.documentElement.contains ? function (t, e) {
return t !== e && t.contains(e)
} : function (t, e) {
for (; e && (e = e.parentNode);)if (e === t)return !0;
return !1
}, r.type = $, r.isFunction = F, r.isWindow = k, r.isArray = L, r.isPlainObject = Z, r.isEmptyObject = function (t) {
var e;
for (e in t)return !1;
return !0
}, r.isNumeric = function (t) {
var e = Number(t), n = typeof t;
return null != t && "boolean" != n && ("string" != n || t.length) && !isNaN(e) && isFinite(e) || !1
}, r.inArray = function (t, e, n) {
return o.indexOf.call(e, t, n)
}, r.camelCase = O, r.trim = function (t) {
return null == t ? "" : String.prototype.trim.call(t)
}, r.uuid = 0, r.support = {}, r.expr = {}, r.noop = function () {
}, r.map = function (t, e) {
var n, i, o, r = [];
if (z(t))for (i = 0; i < t.length; i++)n = e(t[i], i), null != n && r.push(n); else for (o in t)n = e(t[o], o), null != n && r.push(n);
return H(r)
}, r.each = function (t, e) {
var n, r;
if (z(t)) {
for (n = 0; n < t.length; n++)if (e.call(t[n], n, t[n]) === !1)return t
} else for (r in t)if (e.call(t[r], r, t[r]) === !1)return t;
return t
}, r.grep = function (t, e) {
return a.call(t, e)
}, t.JSON && (r.parseJSON = JSON.parse), r.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function (t, e) {
S["[object " + e + "]"] = e.toLowerCase()
}), r.fn = {
constructor: N.Z,
length: 0,
forEach: o.forEach,
reduce: o.reduce,
push: o.push,
sort: o.sort,
splice: o.splice,
indexOf: o.indexOf,
concat: function () {
var t, e, n = [];
for (t = 0; t < arguments.length; t++)e = arguments[t], n[t] = N.isZ(e) ? e.toArray() : e;
return s.apply(N.isZ(this) ? this.toArray() : this, n)
},
map: function (t) {
return r(r.map(this, function (e, n) {
return t.call(e, n, e)
}))
},
slice: function () {
return r(u.apply(this, arguments))
},
ready: function (t) {
return w.test(f.readyState) && f.body ? t(r) : f.addEventListener("DOMContentLoaded", function () {
t(r)
}, !1), this
},
get: function (t) {
return t === e ? u.call(this) : this[t >= 0 ? t : t + this.length]
},
toArray: function () {
return this.get()
},
size: function () {
return this.length
},
remove: function () {
return this.each(function () {
null != this.parentNode && this.parentNode.removeChild(this)
})
},
each: function (t) {
return o.every.call(this, function (e, n) {
return t.call(e, n, e) !== !1
}), this
},
filter: function (t) {
return F(t) ? this.not(this.not(t)) : r(a.call(this, function (e) {
return N.matches(e, t)
}))
},
add: function (t, e) {
return r(P(this.concat(r(t, e))))
},
is: function (t) {
return this.length > 0 && N.matches(this[0], t)
},
not: function (t) {
var n = [];
if (F(t) && t.call !== e) this.each(function (e) {
t.call(this, e) || n.push(this)
}); else {
var i = "string" == typeof t ? this.filter(t) : z(t) && F(t.item) ? u.call(t) : r(t);
this.forEach(function (t) {
i.indexOf(t) < 0 && n.push(t)
})
}
return r(n)
},
has: function (t) {
return this.filter(function () {
return R(t) ? r.contains(this, t) : r(this).find(t).size()
})
},
eq: function (t) {
return -1 === t ? this.slice(t) : this.slice(t, +t + 1)
},
first: function () {
var t = this[0];
return t && !R(t) ? t : r(t)
},
last: function () {
var t = this[this.length - 1];
return t && !R(t) ? t : r(t)
},
find: function (t) {
var e, n = this;
return e = t ? "object" == typeof t ? r(t).filter(function () {
var t = this;
return o.some.call(n, function (e) {
return r.contains(e, t)
})
}) : 1 == this.length ? r(N.qsa(this[0], t)) : this.map(function () {
return N.qsa(this, t)
}) : r()
},
closest: function (t, e) {
var n = [], i = "object" == typeof t && r(t);
return this.each(function (r, o) {
for (; o && !(i ? i.indexOf(o) >= 0 : N.matches(o, t));)o = o !== e && !M(o) && o.parentNode;
o && n.indexOf(o) < 0 && n.push(o)
}), r(n)
},
parents: function (t) {
for (var e = [], n = this; n.length > 0;)n = r.map(n, function (t) {
return (t = t.parentNode) && !M(t) && e.indexOf(t) < 0 ? (e.push(t), t) : void 0
});
return W(e, t)
},
parent: function (t) {
return W(P(this.pluck("parentNode")), t)
},
children: function (t) {
return W(this.map(function () {
return U(this)
}), t)
},
contents: function () {
return this.map(function () {
return this.contentDocument || u.call(this.childNodes)
})
},
siblings: function (t) {
return W(this.map(function (t, e) {
return a.call(U(e.parentNode), function (t) {
return t !== e
})
}), t)
},
empty: function () {
return this.each(function () {
this.innerHTML = ""
})
},
pluck: function (t) {
return r.map(this, function (e) {
return e[t]
})
},
show: function () {
return this.each(function () {
"none" == this.style.display && (this.style.display = ""), "none" == getComputedStyle(this, "").getPropertyValue("display") && (this.style.display = B(this.nodeName))
})
},
replaceWith: function (t) {
return this.before(t).remove()
},
wrap: function (t) {
var e = F(t);
if (this[0] && !e)var n = r(t).get(0), i = n.parentNode || this.length > 1;
return this.each(function (o) {
r(this).wrapAll(e ? t.call(this, o) : i ? n.cloneNode(!0) : n)
})
},
wrapAll: function (t) {
if (this[0]) {
r(this[0]).before(t = r(t));
for (var e; (e = t.children()).length;)t = e.first();
r(t).append(this)
}
return this
},
wrapInner: function (t) {
var e = F(t);
return this.each(function (n) {
var i = r(this), o = i.contents(), s = e ? t.call(this, n) : t;
o.length ? o.wrapAll(s) : i.append(s)
})
},
unwrap: function () {
return this.parent().each(function () {
r(this).replaceWith(r(this).children())
}), this
},
clone: function () {
return this.map(function () {
return this.cloneNode(!0)
})
},
hide: function () {
return this.css("display", "none")
},
toggle: function (t) {
return this.each(function () {
var n = r(this);
(t === e ? "none" == n.css("display") : t) ? n.show() : n.hide()
})
},
prev: function (t) {
return r(this.pluck("previousElementSibling")).filter(t || "*")
},
next: function (t) {
return r(this.pluck("nextElementSibling")).filter(t || "*")
},
html: function (t) {
return 0 in arguments ? this.each(function (e) {
var n = this.innerHTML;
r(this).empty().append(Y(this, t, e, n))
}) : 0 in this ? this[0].innerHTML : null
},
text: function (t) {
return 0 in arguments ? this.each(function (e) {
var n = Y(this, t, e, this.textContent);
this.textContent = null == n ? "" : "" + n
}) : 0 in this ? this.pluck("textContent").join("") : null
},
attr: function (t, r) {
var i;
return "string" != typeof t || 1 in arguments ? this.each(function (e) {
if (1 === this.nodeType)if (R(t))for (n in t)G(this, n, t[n]); else G(this, t, Y(this, r, e, this.getAttribute(t)))
}) : 0 in this && 1 == this[0].nodeType && null != (i = this[0].getAttribute(t)) ? i : e
},
removeAttr: function (t) {
return this.each(function () {
1 === this.nodeType && t.split(" ").forEach(function (t) {
G(this, t)
}, this)
})
},
prop: function (t, e) {
return t = D[t] || t, 1 in arguments ? this.each(function (n) {
this[t] = Y(this, e, n, this[t])
}) : this[0] && this[0][t]
},
removeProp: function (t) {
return t = D[t] || t, this.each(function () {
delete this[t]
})
},
data: function (t, n) {
var r = "data-" + t.replace(v, "-$1").toLowerCase(),
i = 1 in arguments ? this.attr(r, n) : this.attr(r);
return null !== i ? Q(i) : e
},
val: function (t) {
return 0 in arguments ? (null == t && (t = ""), this.each(function (e) {
this.value = Y(this, t, e, this.value)
})) : this[0] && (this[0].multiple ? r(this[0]).find("option").filter(function () {
return this.selected
}).pluck("value") : this[0].value)
},
offset: function (e) {
if (e)return this.each(function (t) {
var n = r(this), i = Y(this, e, t, n.offset()), o = n.offsetParent().offset(),
s = {top: i.top - o.top, left: i.left - o.left};
"static" == n.css("position") && (s.position = "relative"), n.css(s)
});
if (!this.length)return null;
if (f.documentElement !== this[0] && !r.contains(f.documentElement, this[0]))return {top: 0, left: 0};
var n = this[0].getBoundingClientRect();
return {
left: n.left + t.pageXOffset,
top: n.top + t.pageYOffset,
width: Math.round(n.width),
height: Math.round(n.height)
}
},
css: function (t, e) {
if (arguments.length < 2) {
var i = this[0];
if ("string" == typeof t) {
if (!i)return;
return i.style[O(t)] || getComputedStyle(i, "").getPropertyValue(t)
}
if (L(t)) {
if (!i)return;
var o = {}, s = getComputedStyle(i, "");
return r.each(t, function (t, e) {
o[e] = i.style[O(e)] || s.getPropertyValue(e)
}), o
}
}
var a = "";
if ("string" == $(t)) e || 0 === e ? a = I(t) + ":" + _(t, e) : this.each(function () {
this.style.removeProperty(I(t))
}); else for (n in t)t[n] || 0 === t[n] ? a += I(n) + ":" + _(n, t[n]) + ";" : this.each(function () {
this.style.removeProperty(I(n))
});
return this.each(function () {
this.style.cssText += ";" + a
})
},
index: function (t) {
return t ? this.indexOf(r(t)[0]) : this.parent().children().indexOf(this[0])
},
hasClass: function (t) {
return t ? o.some.call(this, function (t) {
return this.test(K(t))
}, V(t)) : !1
},
addClass: function (t) {
return t ? this.each(function (e) {
if ("className" in this) {
i = [];
var n = K(this), o = Y(this, t, e, n);
o.split(/\s+/g).forEach(function (t) {
r(this).hasClass(t) || i.push(t)
}, this), i.length && K(this, n + (n ? " " : "") + i.join(" "))
}
}) : this
},
removeClass: function (t) {
return this.each(function (n) {
if ("className" in this) {
if (t === e)return K(this, "");
i = K(this), Y(this, t, n, i).split(/\s+/g).forEach(function (t) {
i = i.replace(V(t), " ")
}), K(this, i.trim())
}
})
},
toggleClass: function (t, n) {
return t ? this.each(function (i) {
var o = r(this), s = Y(this, t, i, K(this));
s.split(/\s+/g).forEach(function (t) {
(n === e ? !o.hasClass(t) : n) ? o.addClass(t) : o.removeClass(t)
})
}) : this
},
scrollTop: function (t) {
if (this.length) {
var n = "scrollTop" in this[0];
return t === e ? n ? this[0].scrollTop : this[0].pageYOffset : this.each(n ? function () {
this.scrollTop = t
} : function () {
this.scrollTo(this.scrollX, t)
})
}
},
scrollLeft: function (t) {
if (this.length) {
var n = "scrollLeft" in this[0];
return t === e ? n ? this[0].scrollLeft : this[0].pageXOffset : this.each(n ? function () {
this.scrollLeft = t
} : function () {
this.scrollTo(t, this.scrollY)
})
}
},
position: function () {
if (this.length) {
var t = this[0], e = this.offsetParent(), n = this.offset(),
i = g.test(e[0].nodeName) ? {top: 0, left: 0} : e.offset();
return n.top -= parseFloat(r(t).css("margin-top")) || 0, n.left -= parseFloat(r(t).css("margin-left")) || 0, i.top += parseFloat(r(e[0]).css("border-top-width")) || 0, i.left += parseFloat(r(e[0]).css("border-left-width")) || 0, {
top: n.top - i.top,
left: n.left - i.left
}
}
},
offsetParent: function () {
return this.map(function () {
for (var t = this.offsetParent || f.body; t && !g.test(t.nodeName) && "static" == r(t).css("position");)t = t.offsetParent;
return t
})
}
}, r.fn.detach = r.fn.remove, ["width", "height"].forEach(function (t) {
var n = t.replace(/./, function (t) {
return t[0].toUpperCase()
});
r.fn[t] = function (i) {
var o, s = this[0];
return i === e ? k(s) ? s["inner" + n] : M(s) ? s.documentElement["scroll" + n] : (o = this.offset()) && o[t] : this.each(function (e) {
s = r(this), s.css(t, Y(this, i, e, s[t]()))
})
}
}), x.forEach(function (n, i) {
var o = i % 2;
r.fn[n] = function () {
var n, a, s = r.map(arguments, function (t) {
var i = [];
return n = $(t), "array" == n ? (t.forEach(function (t) {
return t.nodeType !== e ? i.push(t) : r.zepto.isZ(t) ? i = i.concat(t.get()) : void(i = i.concat(N.fragment(t)))
}), i) : "object" == n || null == t ? t : N.fragment(t)
}), u = this.length > 1;
return s.length < 1 ? this : this.each(function (e, n) {
a = o ? n : n.parentNode, n = 0 == i ? n.nextSibling : 1 == i ? n.firstChild : 2 == i ? n : null;
var c = r.contains(f.documentElement, a);
s.forEach(function (e) {
if (u) e = e.cloneNode(!0); else if (!a)return r(e).remove();
a.insertBefore(e, n), c && tt(e, function (e) {
if (!(null == e.nodeName || "SCRIPT" !== e.nodeName.toUpperCase() || e.type && "text/javascript" !== e.type || e.src)) {
var n = e.ownerDocument ? e.ownerDocument.defaultView : t;
n.eval.call(n, e.innerHTML)
}
})
})
})
}, r.fn[o ? n + "To" : "insert" + (i ? "Before" : "After")] = function (t) {
return r(t)[n](this), this
}
}), N.Z.prototype = X.prototype = r.fn, N.uniq = P, N.deserializeValue = Q, r.zepto = N, r
}();
return t.Zepto = e, void 0 === t.$ && (t.$ = e), function (e) {
function h(t) {
return t._zid || (t._zid = n++)
}
function p(t, e, n, r) {
if (e = d(e), e.ns)var i = m(e.ns);
return (a[h(t)] || []).filter(function (t) {
return t && (!e.e || t.e == e.e) && (!e.ns || i.test(t.ns)) && (!n || h(t.fn) === h(n)) && (!r || t.sel == r)
})
}
function d(t) {
var e = ("" + t).split(".");
return {e: e[0], ns: e.slice(1).sort().join(" ")}
}
function m(t) {
return new RegExp("(?:^| )" + t.replace(" ", " .* ?") + "(?: |$)")
}
function g(t, e) {
return t.del && !f && t.e in c || !!e
}
function v(t) {
return l[t] || f && c[t] || t
}
function y(t, n, i, o, s, u, f) {
var c = h(t), p = a[c] || (a[c] = []);
n.split(/\s/).forEach(function (n) {
if ("ready" == n)return e(document).ready(i);
var a = d(n);
a.fn = i, a.sel = s, a.e in l && (i = function (t) {
var n = t.relatedTarget;
return !n || n !== this && !e.contains(this, n) ? a.fn.apply(this, arguments) : void 0
}), a.del = u;
var c = u || i;
a.proxy = function (e) {
if (e = T(e), !e.isImmediatePropagationStopped()) {
e.data = o;
var n = c.apply(t, e._args == r ? [e] : [e].concat(e._args));
return n === !1 && (e.preventDefault(), e.stopPropagation()), n
}
}, a.i = p.length, p.push(a), "addEventListener" in t && t.addEventListener(v(a.e), a.proxy, g(a, f))
})
}
function x(t, e, n, r, i) {
var o = h(t);
(e || "").split(/\s/).forEach(function (e) {
p(t, e, n, r).forEach(function (e) {
delete a[o][e.i], "removeEventListener" in t && t.removeEventListener(v(e.e), e.proxy, g(e, i))
})
})
}
function T(t, n) {
return (n || !t.isDefaultPrevented) && (n || (n = t), e.each(w, function (e, r) {
var i = n[e];
t[e] = function () {
return this[r] = b, i && i.apply(n, arguments)
}, t[r] = E
}), t.timeStamp || (t.timeStamp = Date.now()), (n.defaultPrevented !== r ? n.defaultPrevented : "returnValue" in n ? n.returnValue === !1 : n.getPreventDefault && n.getPreventDefault()) && (t.isDefaultPrevented = b)), t
}
function S(t) {
var e, n = {originalEvent: t};
for (e in t)j.test(e) || t[e] === r || (n[e] = t[e]);
return T(n, t)
}
var r, n = 1, i = Array.prototype.slice, o = e.isFunction, s = function (t) {
return "string" == typeof t
}, a = {}, u = {}, f = "onfocusin" in t, c = {focus: "focusin", blur: "focusout"},
l = {mouseenter: "mouseover", mouseleave: "mouseout"};
u.click = u.mousedown = u.mouseup = u.mousemove = "MouseEvents", e.event = {
add: y,
remove: x
}, e.proxy = function (t, n) {
var r = 2 in arguments && i.call(arguments, 2);
if (o(t)) {
var a = function () {
return t.apply(n, r ? r.concat(i.call(arguments)) : arguments)
};
return a._zid = h(t), a
}
if (s(n))return r ? (r.unshift(t[n], t), e.proxy.apply(null, r)) : e.proxy(t[n], t);
throw new TypeError("expected function")
}, e.fn.bind = function (t, e, n) {
return this.on(t, e, n)
}, e.fn.unbind = function (t, e) {
return this.off(t, e)
}, e.fn.one = function (t, e, n, r) {
return this.on(t, e, n, r, 1)
};
var b = function () {
return !0
}, E = function () {
return !1
}, j = /^([A-Z]|returnValue$|layer[XY]$|webkitMovement[XY]$)/, w = {
preventDefault: "isDefaultPrevented",
stopImmediatePropagation: "isImmediatePropagationStopped",
stopPropagation: "isPropagationStopped"
};
e.fn.delegate = function (t, e, n) {
return this.on(e, t, n)
}, e.fn.undelegate = function (t, e, n) {
return this.off(e, t, n)
}, e.fn.live = function (t, n) {
return e(document.body).delegate(this.selector, t, n), this
}, e.fn.die = function (t, n) {
return e(document.body).undelegate(this.selector, t, n), this
}, e.fn.on = function (t, n, a, u, f) {
var c, l, h = this;
return t && !s(t) ? (e.each(t, function (t, e) {
h.on(t, n, a, e, f)
}), h) : (s(n) || o(u) || u === !1 || (u = a, a = n, n = r), (u === r || a === !1) && (u = a, a = r), u === !1 && (u = E), h.each(function (r, o) {
f && (c = function (t) {
return x(o, t.type, u), u.apply(this, arguments)
}), n && (l = function (t) {
var r, s = e(t.target).closest(n, o).get(0);
return s && s !== o ? (r = e.extend(S(t), {
currentTarget: s,
liveFired: o
}), (c || u).apply(s, [r].concat(i.call(arguments, 1)))) : void 0
}), y(o, t, u, a, n, l || c)
}))
}, e.fn.off = function (t, n, i) {
var a = this;
return t && !s(t) ? (e.each(t, function (t, e) {
a.off(t, n, e)
}), a) : (s(n) || o(i) || i === !1 || (i = n, n = r), i === !1 && (i = E), a.each(function () {
x(this, t, i, n)
}))
}, e.fn.trigger = function (t, n) {
return t = s(t) || e.isPlainObject(t) ? e.Event(t) : T(t), t._args = n, this.each(function () {
t.type in c && "function" == typeof this[t.type] ? this[t.type]() : "dispatchEvent" in this ? this.dispatchEvent(t) : e(this).triggerHandler(t, n)
})
}, e.fn.triggerHandler = function (t, n) {
var r, i;
return this.each(function (o, a) {
r = S(s(t) ? e.Event(t) : t), r._args = n, r.target = a, e.each(p(a, t.type || t), function (t, e) {
return i = e.proxy(r), r.isImmediatePropagationStopped() ? !1 : void 0
})
}), i
}, "focusin focusout focus blur load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select keydown keypress keyup error".split(" ").forEach(function (t) {
e.fn[t] = function (e) {
return 0 in arguments ? this.bind(t, e) : this.trigger(t)
}
}), e.Event = function (t, e) {
s(t) || (e = t, t = e.type);
var n = document.createEvent(u[t] || "Events"), r = !0;
if (e)for (var i in e)"bubbles" == i ? r = !!e[i] : n[i] = e[i];
return n.initEvent(t, r, !0), T(n)
}
}(e), function (e) {
function p(t, n, r) {
var i = e.Event(n);
return e(t).trigger(i, r), !i.isDefaultPrevented()
}
function d(t, e, n, i) {
return t.global ? p(e || r, n, i) : void 0
}
function m(t) {
t.global && 0 === e.active++ && d(t, null, "ajaxStart")
}
function g(t) {
t.global && !--e.active && d(t, null, "ajaxStop")
}
function v(t, e) {
var n = e.context;
return e.beforeSend.call(n, t, e) === !1 || d(e, n, "ajaxBeforeSend", [t, e]) === !1 ? !1 : void d(e, n, "ajaxSend", [t, e])
}
function y(t, e, n, r) {
var i = n.context, o = "success";
n.success.call(i, t, o, e), r && r.resolveWith(i, [t, o, e]), d(n, i, "ajaxSuccess", [e, n, t]), b(o, e, n)
}
function x(t, e, n, r, i) {
var o = r.context;
r.error.call(o, n, e, t), i && i.rejectWith(o, [n, e, t]), d(r, o, "ajaxError", [n, r, t || e]), b(e, n, r)
}
function b(t, e, n) {
var r = n.context;
n.complete.call(r, e, t), d(n, r, "ajaxComplete", [e, n]), g(n)
}
function E(t, e, n) {
if (n.dataFilter == j)return t;
var r = n.context;
return n.dataFilter.call(r, t, e)
}
function j() {
}
function w(t) {
return t && (t = t.split(";", 2)[0]), t && (t == c ? "html" : t == f ? "json" : a.test(t) ? "script" : u.test(t) && "xml") || "text"
}
function T(t, e) {
return "" == e ? t : (t + "&" + e).replace(/[&?]{1,2}/, "?")
}
function S(t) {
t.processData && t.data && "string" != e.type(t.data) && (t.data = e.param(t.data, t.traditional)), !t.data || t.type && "GET" != t.type.toUpperCase() && "jsonp" != t.dataType || (t.url = T(t.url, t.data), t.data = void 0)
}
function C(t, n, r, i) {
return e.isFunction(n) && (i = r, r = n, n = void 0), e.isFunction(r) || (i = r, r = void 0), {
url: t,
data: n,
success: r,
dataType: i
}
}
function O(t, n, r, i) {
var o, s = e.isArray(n), a = e.isPlainObject(n);
e.each(n, function (n, u) {
o = e.type(u), i && (n = r ? i : i + "[" + (a || "object" == o || "array" == o ? n : "") + "]"), !i && s ? t.add(u.name, u.value) : "array" == o || !r && "object" == o ? O(t, u, r, n) : t.add(n, u)
})
}
var i, o, n = +new Date, r = t.document, s = /<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>/gi,
a = /^(?:text|application)\/javascript/i, u = /^(?:text|application)\/xml/i, f = "application/json",
c = "text/html", l = /^\s*$/, h = r.createElement("a");
h.href = t.location.href, e.active = 0, e.ajaxJSONP = function (i, o) {
if (!("type" in i))return e.ajax(i);
var c, p, s = i.jsonpCallback, a = (e.isFunction(s) ? s() : s) || "Zepto" + n++,
u = r.createElement("script"), f = t[a], l = function (t) {
e(u).triggerHandler("error", t || "abort")
}, h = {abort: l};
return o && o.promise(h), e(u).on("load error", function (n, r) {
clearTimeout(p), e(u).off().remove(), "error" != n.type && c ? y(c[0], h, i, o) : x(null, r || "error", h, i, o), t[a] = f, c && e.isFunction(f) && f(c[0]), f = c = void 0
}), v(h, i) === !1 ? (l("abort"), h) : (t[a] = function () {
c = arguments
}, u.src = i.url.replace(/\?(.+)=\?/, "?$1=" + a), r.head.appendChild(u), i.timeout > 0 && (p = setTimeout(function () {
l("timeout")
}, i.timeout)), h)
}, e.ajaxSettings = {
type: "GET",
beforeSend: j,
success: j,
error: j,
complete: j,
context: null,
global: !0,
xhr: function () {
return new t.XMLHttpRequest
},
accepts: {
script: "text/javascript, application/javascript, application/x-javascript",
json: f,
xml: "application/xml, text/xml",
html: c,
text: "text/plain"
},
crossDomain: !1,
timeout: 0,
processData: !0,
cache: !0,
dataFilter: j
}, e.ajax = function (n) {
var u, f, s = e.extend({}, n || {}), a = e.Deferred && e.Deferred();
for (i in e.ajaxSettings)void 0 === s[i] && (s[i] = e.ajaxSettings[i]);
m(s), s.crossDomain || (u = r.createElement("a"), u.href = s.url, u.href = u.href, s.crossDomain = h.protocol + "//" + h.host != u.protocol + "//" + u.host), s.url || (s.url = t.location.toString()), (f = s.url.indexOf("#")) > -1 && (s.url = s.url.slice(0, f)), S(s);
var c = s.dataType, p = /\?.+=\?/.test(s.url);
if (p && (c = "jsonp"), s.cache !== !1 && (n && n.cache === !0 || "script" != c && "jsonp" != c) || (s.url = T(s.url, "_=" + Date.now())), "jsonp" == c)return p || (s.url = T(s.url, s.jsonp ? s.jsonp + "=?" : s.jsonp === !1 ? "" : "callback=?")), e.ajaxJSONP(s, a);
var P, d = s.accepts[c], g = {}, b = function (t, e) {
g[t.toLowerCase()] = [t, e]
}, C = /^([\w-]+:)\/\//.test(s.url) ? RegExp.$1 : t.location.protocol, N = s.xhr(), O = N.setRequestHeader;
if (a && a.promise(N), s.crossDomain || b("X-Requested-With", "XMLHttpRequest"), b("Accept", d || "*/*"), (d = s.mimeType || d) && (d.indexOf(",") > -1 && (d = d.split(",", 2)[0]), N.overrideMimeType && N.overrideMimeType(d)), (s.contentType || s.contentType !== !1 && s.data && "GET" != s.type.toUpperCase()) && b("Content-Type", s.contentType || "application/x-www-form-urlencoded"), s.headers)for (o in s.headers)b(o, s.headers[o]);
if (N.setRequestHeader = b, N.onreadystatechange = function () {
if (4 == N.readyState) {
N.onreadystatechange = j, clearTimeout(P);
var t, n = !1;
if (N.status >= 200 && N.status < 300 || 304 == N.status || 0 == N.status && "file:" == C) {
if (c = c || w(s.mimeType || N.getResponseHeader("content-type")), "arraybuffer" == N.responseType || "blob" == N.responseType) t = N.response; else {
t = N.responseText;
try {
t = E(t, c, s), "script" == c ? (1, eval)(t) : "xml" == c ? t = N.responseXML : "json" == c && (t = l.test(t) ? null : e.parseJSON(t))
} catch (r) {
n = r
}
if (n)return x(n, "parsererror", N, s, a)
}
y(t, N, s, a)
} else x(N.statusText || null, N.status ? "error" : "abort", N, s, a)
}
}, v(N, s) === !1)return N.abort(), x(null, "abort", N, s, a), N;
var A = "async" in s ? s.async : !0;
if (N.open(s.type, s.url, A, s.username, s.password), s.xhrFields)for (o in s.xhrFields)N[o] = s.xhrFields[o];
for (o in g)O.apply(N, g[o]);
return s.timeout > 0 && (P = setTimeout(function () {
N.onreadystatechange = j, N.abort(), x(null, "timeout", N, s, a)
}, s.timeout)), N.send(s.data ? s.data : null), N
}, e.get = function () {
return e.ajax(C.apply(null, arguments))
}, e.post = function () {
var t = C.apply(null, arguments);
return t.type = "POST", e.ajax(t)
}, e.getJSON = function () {
var t = C.apply(null, arguments);
return t.dataType = "json", e.ajax(t)
}, e.fn.load = function (t, n, r) {
if (!this.length)return this;
var a, i = this, o = t.split(/\s/), u = C(t, n, r), f = u.success;
return o.length > 1 && (u.url = o[0], a = o[1]), u.success = function (t) {
i.html(a ? e("<div>").html(t.replace(s, "")).find(a) : t), f && f.apply(i, arguments)
}, e.ajax(u), this
};
var N = encodeURIComponent;
e.param = function (t, n) {
var r = [];
return r.add = function (t, n) {
e.isFunction(n) && (n = n()), null == n && (n = ""), this.push(N(t) + "=" + N(n))
}, O(r, t, n), r.join("&").replace(/%20/g, "+")
}
}(e), function (t) {
t.fn.serializeArray = function () {
var e, n, r = [], i = function (t) {
return t.forEach ? t.forEach(i) : void r.push({name: e, value: t})
};
return this[0] && t.each(this[0].elements, function (r, o) {
n = o.type, e = o.name, e && "fieldset" != o.nodeName.toLowerCase() && !o.disabled && "submit" != n && "reset" != n && "button" != n && "file" != n && ("radio" != n && "checkbox" != n || o.checked) && i(t(o).val())
}), r
}, t.fn.serialize = function () {
var t = [];
return this.serializeArray().forEach(function (e) {
t.push(encodeURIComponent(e.name) + "=" + encodeURIComponent(e.value))
}), t.join("&")
}, t.fn.submit = function (e) {
if (0 in arguments) this.bind("submit", e); else if (this.length) {
var n = t.Event("submit");
this.eq(0).trigger(n), n.isDefaultPrevented() || this.get(0).submit()
}
return this
}
}(e), function () {
try {
getComputedStyle(void 0)
} catch (e) {
var n = getComputedStyle;
t.getComputedStyle = function (t, e) {
try {
return n(t, e)
} catch (r) {
return null
} |
//############################# Underscore ############################################
// Underscore.js 1.8.3
// http://underscorejs.org
// (c) 2009-2015 Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
// Underscore may be freely distributed under the MIT license.
(function () {
function n(n) {
function t(t, r, e, u, i, o) {
for (; i >= 0 && o > i; i += n) {
var a = u ? u[i] : i;
e = r(e, t[a], a, t)
}
return e
}
return function (r, e, u, i) {
e = b(e, i, 4);
var o = !k(r) && m.keys(r), a = (o || r).length, c = n > 0 ? 0 : a - 1;
return arguments.length < 3 && (u = r[o ? o[c] : c], c += n), t(r, e, u, o, c, a)
}
}
function t(n) {
return function (t, r, e) {
r = x(r, e);
for (var u = O(t), i = n > 0 ? 0 : u - 1; i >= 0 && u > i; i += n)if (r(t[i], i, t))return i;
return -1
}
}
function r(n, t, r) {
return function (e, u, i) {
var o = 0, a = O(e);
if ("number" == typeof i) n > 0 ? o = i >= 0 ? i : Math.max(i + a, o) : a = i >= 0 ? Math.min(i + 1, a) : i + a + 1; else if (r && i && a)return i = r(e, u), e[i] === u ? i : -1;
if (u !== u)return i = t(l.call(e, o, a), m.isNaN), i >= 0 ? i + o : -1;
for (i = n > 0 ? o : a - 1; i >= 0 && a > i; i += n)if (e[i] === u)return i;
return -1
}
}
function e(n, t) {
var r = I.length, e = n.constructor, u = m.isFunction(e) && e.prototype || a, i = "constructor";
for (m.has(n, i) && !m.contains(t, i) && t.push(i); r--;)i = I[r], i in n && n[i] !== u[i] && !m.contains(t, i) && t.push(i)
}
var u = this, i = u._, o = Array.prototype, a = Object.prototype, c = Function.prototype, f = o.push, l = o.slice,
s = a.toString, p = a.hasOwnProperty, h = Array.isArray, v = Object.keys, g = c.bind, y = Object.create,
d = function () {
}, m = function (n) {
return n instanceof m ? n : this instanceof m ? void(this._wrapped = n) : new m(n)
};
"undefined" != typeof exports ? ("undefined" != typeof module && module.exports && (exports = module.exports = m), exports._ = m) : u._ = m, m.VERSION = "1.8.3";
var b = function (n, t, r) {
if (t === void 0)return n;
switch (null == r ? 3 : r) {
case 1:
return function (r) {
return n.call(t, r)
};
case 2:
return function (r, e) {
return n.call(t, r, e)
};
case 3:
return function (r, e, u) {
return n.call(t, r, e, u)
};
case 4:
return function (r, e, u, i) {
return n.call(t, r, e, u, i)
}
}
return function () {
return n.apply(t, arguments)
}
}, x = function (n, t, r) {
return null == n ? m.identity : m.isFunction(n) ? b(n, t, r) : m.isObject(n) ? m.matcher(n) : m.property(n)
};
m.iteratee = function (n, t) {
return x(n, t, 1 / 0)
};
var _ = function (n, t) {
return function (r) {
var e = arguments.length;
if (2 > e || null == r)return r;
for (var u = 1; e > u; u++)for (var i = arguments[u], o = n(i), a = o.length, c = 0; a > c; c++) {
var f = o[c];
t && r[f] !== void 0 || (r[f] = i[f])
}
return r
}
}, j = function (n) {
if (!m.isObject(n))return {};
if (y)return y(n);
d.prototype = n;
var t = new d;
return d.prototype = null, t
}, w = function (n) {
return function (t) {
return null == t ? void 0 : t[n]
}
}, A = Math.pow(2, 53) - 1, O = w("length"), k = function (n) {
var t = O(n);
return "number" == typeof t && t >= 0 && A >= t
};
m.each = m.forEach = function (n, t, r) {
t = b(t, r);
var e, u;
if (k(n))for (e = 0, u = n.length; u > e; e++)t(n[e], e, n); else {
var i = m.keys(n);
for (e = 0, u = i.length; u > e; e++)t(n[i[e]], i[e], n)
}
return n
}, m.map = m.collect = function (n, t, r) {
t = x(t, r);
for (var e = !k(n) && m.keys(n), u = (e || n).length, i = Array(u), o = 0; u > o; o++) {
var a = e ? e[o] : o;
i[o] = t(n[a], a, n)
}
return i
}, m.reduce = m.foldl = m.inject = n(1), m.reduceRight = m.foldr = n(-1), m.find = m.detect = function (n, t, r) {
var e;
return e = k(n) ? m.findIndex(n, t, r) : m.findKey(n, t, r), e !== void 0 && e !== -1 ? n[e] : void 0
}, m.filter = m.select = function (n, t, r) {
var e = [];
return t = x(t, r), m.each(n, function (n, r, u) {
t(n, r, u) && e.push(n)
}), e
}, m.reject = function (n, t, r) {
return m.filter(n, m.negate(x(t)), r)
}, m.every = m.all = function (n, t, r) {
t = x(t, r);
for (var e = !k(n) && m.keys(n), u = (e || n).length, i = 0; u > i; i++) {
var o = e ? e[i] : i;
if (!t(n[o], o, n))return !1
}
return !0
}, m.some = m.any = function (n, t, r) {
t = x(t, r);
for (var e = !k(n) && m.keys(n), u = (e || n).length, i = 0; u > i; i++) {
var o = e ? e[i] : i;
if (t(n[o], o, n))return !0
}
return !1
}, m.contains = m.includes = m.include = function (n, t, r, e) {
return k(n) || (n = m.values(n)), ("number" != typeof r || e) && (r = 0), m.indexOf(n, t, r) >= 0
}, m.invoke = function (n, t) {
var r = l.call(arguments, 2), e = m.isFunction(t);
return m.map(n, function (n) {
var u = e ? t : n[t];
return null == u ? u : u.apply(n, r)
})
}, m.pluck = function (n, t) {
return m.map(n, m.property(t))
}, m.where = function (n, t) {
return m.filter(n, m.matcher(t))
}, m.findWhere = function (n, t) {
return m.find(n, m.matcher(t))
}, m.max = function (n, t, r) {
var e, u, i = -1 / 0, o = -1 / 0;
if (null == t && null != n) {
n = k(n) ? n : m.values(n);
for (var a = 0, c = n.length; c > a; a++)e = n[a], e > i && (i = e)
} else t = x(t, r), m.each(n, function (n, r, e) {
u = t(n, r, e), (u > o || u === -1 / 0 && i === -1 / 0) && (i = n, o = u)
});
return i
}, m.min = function (n, t, r) {
var e, u, i = 1 / 0, o = 1 / 0;
if (null == t && null != n) {
n = k(n) ? n : m.values(n);
for (var a = 0, c = n.length; c > a; a++)e = n[a], i > e && (i = e)
} else t = x(t, r), m.each(n, function (n, r, e) {
u = t(n, r, e), (o > u || 1 / 0 === u && 1 / 0 === i) && (i = n, o = u)
});
return i
}, m.shuffle = function (n) {
for (var t, r = k(n) ? n : m.values(n), e = r.length, u = Array(e), i = 0; e > i; i++)t = m.random(0, i), t !== i && (u[i] = u[t]), u[t] = r[i];
return u
}, m.sample = function (n, t, r) {
return null == t || r ? (k(n) || (n = m.values(n)), n[m.random(n.length - 1)]) : m.shuffle(n).slice(0, Math.max(0, t))
}, m.sortBy = function (n, t, r) {
return t = x(t, r), m.pluck(m.map(n, function (n, r, e) {
return {value: n, index: r, criteria: t(n, r, e)}
}).sort(function (n, t) {
var r = n.criteria, e = t.criteria;
if (r !== e) {
if (r > e || r === void 0)return 1;
if (e > r || e === void 0)return -1
}
return n.index - t.index
}), "value")
};
var F = function (n) {
return function (t, r, e) {
var u = {};
return r = x(r, e), m.each(t, function (e, i) {
var o = r(e, i, t);
n(u, e, o)
}), u
}
};
m.groupBy = F(function (n, t, r) {
m.has(n, r) ? n[r].push(t) : n[r] = [t]
}), m.indexBy = F(function (n, t, r) {
n[r] = t
}), m.countBy = F(function (n, t, r) {
m.has(n, r) ? n[r]++ : n[r] = 1
}), m.toArray = function (n) {
return n ? m.isArray(n) ? l.call(n) : k(n) ? m.map(n, m.identity) : m.values(n) : []
}, m.size = function (n) {
return null == n ? 0 : k(n) ? n.length : m.keys(n).length
}, m.partition = function (n, t, r) {
t = x(t, r);
var e = [], u = [];
return m.each(n, function (n, r, i) {
(t(n, r, i) ? e : u).push(n)
}), [e, u]
}, m.first = m.head = m.take = function (n, t, r) {
return null == n ? void 0 : null == t || r ? n[0] : m.initial(n, n.length - t)
}, m.initial = function (n, t, r) {
return l.call(n, 0, Math.max(0, n.length - (null == t || r ? 1 : t)))
}, m.last = function (n, t, r) {
return null == n ? void 0 : null == t || r ? n[n.length - 1] : m.rest(n, Math.max(0, n.length - t))
}, m.rest = m.tail = m.drop = function (n, t, r) {
return l.call(n, null == t || r ? 1 : t)
}, m.compact = function (n) {
return m.filter(n, m.identity)
};
var S = function (n, t, r, e) {
for (var u = [], i = 0, o = e || 0, a = O(n); a > o; o++) {
var c = n[o];
if (k(c) && (m.isArray(c) || m.isArguments(c))) {
t || (c = S(c, t, r));
var f = 0, l = c.length;
for (u.length += l; l > f;)u[i++] = c[f++]
} else r || (u[i++] = c)
}
return u
};
m.flatten = function (n, t) {
return S(n, t, !1)
}, m.without = function (n) {
return m.difference(n, l.call(arguments, 1))
}, m.uniq = m.unique = function (n, t, r, e) {
m.isBoolean(t) || (e = r, r = t, t = !1), null != r && (r = x(r, e));
for (var u = [], i = [], o = 0, a = O(n); a > o; o++) {
var c = n[o], f = r ? r(c, o, n) : c;
t ? (o && i === f || u.push(c), i = f) : r ? m.contains(i, f) || (i.push(f), u.push(c)) : m.contains(u, c) || u.push(c)
}
return u
}, m.union = function () {
return m.uniq(S(arguments, !0, !0))
}, m.intersection = function (n) {
for (var t = [], r = arguments.length, e = 0, u = O(n); u > e; e++) {
var i = n[e];
if (!m.contains(t, i)) {
for (var o = 1; r > o && m.contains(arguments[o], i); o++);
o === r && t.push(i)
}
}
return t
}, m.difference = function (n) {
var t = S(arguments, !0, !0, 1);
return m.filter(n, function (n) {
return !m.contains(t, n)
})
}, m.zip = function () {
return m.unzip(arguments)
}, m.unzip = function (n) {
for (var t = n && m.max(n, O).length || 0, r = Array(t), e = 0; t > e; e++)r[e] = m.pluck(n, e);
return r
}, m.object = function (n, t) {
for (var r = {}, e = 0, u = O(n); u > e; e++)t ? r[n[e]] = t[e] : r[n[e][0]] = n[e][1];
return r
}, m.findIndex = t(1), m.findLastIndex = t(-1), m.sortedIndex = function (n, t, r, e) {
r = x(r, e, 1);
for (var u = r(t), i = 0, o = O(n); o > i;) {
var a = Math.floor((i + o) / 2);
r(n[a]) < u ? i = a + 1 : o = a
}
return i
}, m.indexOf = r(1, m.findIndex, m.sortedIndex), m.lastIndexOf = r(-1, m.findLastIndex), m.range = function (n, t, r) {
null == t && (t = n || 0, n = 0), r = r || 1;
for (var e = Math.max(Math.ceil((t - n) / r), 0), u = Array(e), i = 0; e > i; i++, n += r)u[i] = n;
return u
};
var E = function (n, t, r, e, u) {
if (!(e instanceof t))return n.apply(r, u);
var i = j(n.prototype), o = n.apply(i, u);
return m.isObject(o) ? o : i
};
m.bind = function (n, t) {
if (g && n.bind === g)return g.apply(n, l.call(arguments, 1));
if (!m.isFunction(n))throw new TypeError("Bind must be called on a function");
var r = l.call(arguments, 2), e = function () {
return E(n, e, t, this, r.concat(l.call(arguments)))
};
return e
}, m.partial = function (n) {
var t = l.call(arguments, 1), r = function () {
for (var e = 0, u = t.length, i = Array(u), o = 0; u > o; o++)i[o] = t[o] === m ? arguments[e++] : t[o];
for (; e < arguments.length;)i.push(arguments[e++]);
return E(n, r, this, this, i)
};
return r
}, m.bindAll = function (n) {
var t, r, e = arguments.length;
if (1 >= e)throw new Error("bindAll must be passed function names");
for (t = 1; e > t; t++)r = arguments[t], n[r] = m.bind(n[r], n);
return n
}, m.memoize = function (n, t) {
var r = function (e) {
var u = r.cache, i = "" + (t ? t.apply(this, arguments) : e);
return m.has(u, i) || (u[i] = n.apply(this, arguments)), u[i]
};
return r.cache = {}, r
}, m.delay = function (n, t) {
var r = l.call(arguments, 2);
return setTimeout(function () {
return n.apply(null, r)
}, t)
}, m.defer = m.partial(m.delay, m, 1), m.throttle = function (n, t, r) {
var e, u, i, o = null, a = 0;
r || (r = {});
var c = function () {
a = r.leading === !1 ? 0 : m.now(), o = null, i = n.apply(e, u), o || (e = u = null)
};
return function () {
var f = m.now();
a || r.leading !== !1 || (a = f);
var l = t - (f - a);
return e = this, u = arguments, 0 >= l || l > t ? (o && (clearTimeout(o), o = null), a = f, i = n.apply(e, u), o || (e = u = null)) : o || r.trailing === !1 || (o = setTimeout(c, l)), i
}
}, m.debounce = function (n, t, r) {
var e, u, i, o, a, c = function () {
var f = m.now() - o;
t > f && f >= 0 ? e = setTimeout(c, t - f) : (e = null, r || (a = n.apply(i, u), e || (i = u = null)))
};
return function () {
i = this, u = arguments, o = m.now();
var f = r && !e;
return e || (e = setTimeout(c, t)), f && (a = n.apply(i, u), i = u = null), a
}
}, m.wrap = function (n, t) {
return m.partial(t, n)
}, m.negate = function (n) {
return function () {
return !n.apply(this, arguments)
}
}, m.compose = function () {
var n = arguments, t = n.length - 1;
return function () {
for (var r = t, e = n[t].apply(this, arguments); r--;)e = n[r].call(this, e);
return e
}
}, m.after = function (n, t) {
return function () {
return --n < 1 ? t.apply(this, arguments) : void 0
}
}, m.before = function (n, t) {
var r;
return function () {
return --n > 0 && (r = t.apply(this, arguments)), 1 >= n && (t = null), r
}
}, m.once = m.partial(m.before, 2);
var M = !{toString: null}.propertyIsEnumerable("toString"),
I = ["valueOf", "isPrototypeOf", "toString", "propertyIsEnumerable", "hasOwnProperty", "toLocaleString"];
m.keys = function (n) {
if (!m.isObject(n))return [];
if (v)return v(n);
var t = [];
for (var r in n)m.has(n, r) && t.push(r);
return M && e(n, t), t
}, m.allKeys = function (n) {
if (!m.isObject(n))return [];
var t = [];
for (var r in n)t.push(r);
return M && e(n, t), t
}, m.values = function (n) {
for (var t = m.keys(n), r = t.length, e = Array(r), u = 0; r > u; u++)e[u] = n[t[u]];
return e
}, m.mapObject = function (n, t, r) {
t = x(t, r);
for (var e, u = m.keys(n), i = u.length, o = {}, a = 0; i > a; a++)e = u[a], o[e] = t(n[e], e, n);
return o
}, m.pairs = function (n) {
for (var t = m.keys(n), r = t.length, e = Array(r), u = 0; r > u; u++)e[u] = [t[u], n[t[u]]];
return e
}, m.invert = function (n) {
for (var t = {}, r = m.keys(n), e = 0, u = r.length; u > e; e++)t[n[r[e]]] = r[e];
return t
}, m.functions = m.methods = function (n) {
var t = [];
for (var r in n)m.isFunction(n[r]) && t.push(r);
return t.sort()
}, m.extend = _(m.allKeys), m.extendOwn = m.assign = _(m.keys), m.findKey = function (n, t, r) {
t = x(t, r);
for (var e, u = m.keys(n), i = 0, o = u.length; o > i; i++)if (e = u[i], t(n[e], e, n))return e
}, m.pick = function (n, t, r) {
var e, u, i = {}, o = n;
if (null == o)return i;
m.isFunction(t) ? (u = m.allKeys(o), e = b(t, r)) : (u = S(arguments, !1, !1, 1), e = function (n, t, r) {
return t in r
}, o = Object(o));
for (var a = 0, c = u.length; c > a; a++) {
var f = u[a], l = o[f];
e(l, f, o) && (i[f] = l)
}
return i
}, m.omit = function (n, t, r) {
if (m.isFunction(t)) t = m.negate(t); else {
var e = m.map(S(arguments, !1, !1, 1), String);
t = function (n, t) {
return !m.contains(e, t)
}
}
return m.pick(n, t, r)
}, m.defaults = _(m.allKeys, !0), m.create = function (n, t) {
var r = j(n);
return t && m.extendOwn(r, t), r
}, m.clone = function (n) {
return m.isObject(n) ? m.isArray(n) ? n.slice() : m.extend({}, n) : n
}, m.tap = function (n, t) {
return t(n), n
}, m.isMatch = function (n, t) {
var r = m.keys(t), e = r.length;
if (null == n)return !e;
for (var u = Object(n), i = 0; e > i; i++) {
var o = r[i];
if (t[o] !== u[o] || !(o in u))return !1
}
return !0
};
var N = function (n, t, r, e) {
if (n === t)return 0 !== n || 1 / n === 1 / t;
if (null == n || null == t)return n === t;
n instanceof m && (n = n._wrapped), t instanceof m && (t = t._wrapped);
var u = s.call(n);
if (u !== s.call(t))return !1;
switch (u) {
case"[object RegExp]":
case"[object String]":
return "" + n == "" + t;
case"[object Number]":
return +n !== +n ? +t !== +t : 0 === +n ? 1 / +n === 1 / t : +n === +t;
case"[object Date]":
case"[object Boolean]":
return +n === +t
}
var i = "[object Array]" === u;
if (!i) {
if ("object" != typeof n || "object" != typeof t)return !1;
var o = n.constructor, a = t.constructor;
if (o !== a && !(m.isFunction(o) && o instanceof o && m.isFunction(a) && a instanceof a) && "constructor" in n && "constructor" in t)return !1
}
r = r || [], e = e || [];
for (var c = r.length; c--;)if (r[c] === n)return e[c] === t;
if (r.push(n), e.push(t), i) {
if (c = n.length, c !== t.length)return !1;
for (; c--;)if (!N(n[c], t[c], r, e))return !1
} else {
var f, l = m.keys(n);
if (c = l.length, m.keys(t).length !== c)return !1;
for (; c--;)if (f = l[c], !m.has(t, f) || !N(n[f], t[f], r, e))return !1
}
return r.pop(), e.pop(), !0
};
m.isEqual = function (n, t) {
return N(n, t)
}, m.isEmpty = function (n) {
return null == n ? !0 : k(n) && (m.isArray(n) || m.isString(n) || m.isArguments(n)) ? 0 === n.length : 0 === m.keys(n).length
}, m.isElement = function (n) {
return !(!n || 1 !== n.nodeType)
}, m.isArray = h || function (n) {
return "[object Array]" === s.call(n)
}, m.isObject = function (n) {
var t = typeof n;
return "function" === t || "object" === t && !!n
}, m.each(["Arguments", "Function", "String", "Number", "Date", "RegExp", "Error"], function (n) {
m["is" + n] = function (t) {
return s.call(t) === "[object " + n + "]"
}
}), m.isArguments(arguments) || (m.isArguments = function (n) {
return m.has(n, "callee")
}), "function" != typeof/./ && "object" != typeof Int8Array && (m.isFunction = function (n) {
return "function" == typeof n || !1
}), m.isFinite = function (n) {
return isFinite(n) && !isNaN(parseFloat(n))
}, m.isNaN = function (n) {
return m.isNumber(n) && n !== +n
}, m.isBoolean = function (n) {
return n === !0 || n === !1 || "[object Boolean]" === s.call(n)
}, m.isNull = function (n) {
return null === n
}, m.isUndefined = function (n) {
return n === void 0
}, m.has = function (n, t) {
return null != n && p.call(n, t)
}, m.noConflict = function () {
return u._ = i, this
}, m.identity = function (n) {
return n
}, m.constant = function (n) {
return function () {
return n
}
}, m.noop = function () {
}, m.property = w, m.propertyOf = function (n) {
return null == n ? function () {
} : function (t) {
return n[t]
}
}, m.matcher = m.matches = function (n) {
return n = m.extendOwn({}, n), function (t) {
return m.isMatch(t, n)
}
}, m.times = function (n, t, r) {
var e = Array(Math.max(0, n));
t = b(t, r, 1);
for (var u = 0; n > u; u++)e[u] = t(u);
return e
}, m.random = function (n, t) {
return null == t && (t = n, n = 0), n + Math.floor(Math.random() * (t - n + 1))
}, m.now = Date.now || function () {
return (new Date).getTime()
};
var B = {"&": "&", "<": "<", ">": ">", '"': """, "'": "'", "`": "`"}, T = m.invert(B),
R = function (n) {
var t = function (t) {
return n[t]
}, r = "(?:" + m.keys(n).join("|") + ")", e = RegExp(r), u = RegExp(r, "g");
return function (n) {
return n = null == n ? "" : "" + n, e.test(n) ? n.replace(u, t) : n
}
};
m.escape = R(B), m.unescape = R(T), m.result = function (n, t, r) {
var e = null == n ? void 0 : n[t];
return e === void 0 && (e = r), m.isFunction(e) ? e.call(n) : e
};
var q = 0;
m.uniqueId = function (n) {
var t = ++q + "";
return n ? n + t : t
}, m.templateSettings = {evaluate: /<%([\s\S]+?)%>/g, interpolate: /<%=([\s\S]+?)%>/g, escape: /<%-([\s\S]+?)%>/g};
var K = /(.)^/, z = {"'": "'", "\\": "\\", "\r": "r", "\n": "n", "\u2028": "u2028", "\u2029": "u2029"},
D = /\\|'|\r|\n|\u2028|\u2029/g, L = function (n) {
return "\\" + z[n]
};
m.template = function (n, t, r) {
!t && r && (t = r), t = m.defaults({}, t, m.templateSettings);
var e = RegExp([(t.escape || K).source, (t.interpolate || K).source, (t.evaluate || K).source].join("|") + "|$", "g"),
u = 0, i = "__p+='";
n.replace(e, function (t, r, e, o, a) {
return i += n.slice(u, a).replace(D, L), u = a + t.length, r ? i += "'+\n((__t=(" + r + "))==null?'':_.escape(__t))+\n'" : e ? i += "'+\n((__t=(" + e + "))==null?'':__t)+\n'" : o && (i += "';\n" + o + "\n__p+='"), t
}), i += "';\n", t.variable || (i = "with(obj||{}){\n" + i + "}\n"), i = "var __t,__p='',__j=Array.prototype.join," + "print=function(){__p+=__j.call(arguments,'');};\n" + i + "return __p;\n";
try {
var o = new Function(t.variable || "obj", "_", i)
} catch (a) {
throw a.source = i, a
}
var c = function (n) {
return o.call(this, n, m)
}, f = t.variable || "obj";
return c.source = "function(" + f + "){\n" + i + "}", c
}, m.chain = function (n) {
var t = m(n);
return t._chain = !0, t
};
var P = function (n, t) {
return n._chain ? m(t).chain() : t
};
m.mixin = function (n) {
m.each(m.functions(n), function (t) {
var r = m[t] = n[t];
m.prototype[t] = function () {
var n = [this._wrapped];
return f.apply(n, arguments), P(this, r.apply(m, n))
}
})
}, m.mixin(m), m.each(["pop", "push", "reverse", "shift", "sort", "splice", "unshift"], function (n) {
var t = o[n];
m.prototype[n] = function () {
var r = this._wrapped;
return t.apply(r, arguments), "shift" !== n && "splice" !== n || 0 !== r.length || delete r[0], P(this, r)
}
}), m.each(["concat", "join", "slice"], function (n) {
var t = o[n];
m.prototype[n] = function () {
return P(this, t.apply(this._wrapped, arguments))
}
}), m.prototype.value = function () {
return this._wrapped
}, m.prototype.valueOf = m.prototype.toJSON = m.prototype.value, m.prototype.toString = function () {
return "" + this._wrapped
}, "function" == typeof define && define.amd && define("underscore", [], function () {
return m
})
}).call(this);
//# sourceMappingURL=underscore-min.map
//############################# Class ############################################
/* Simple JavaScript Inheritance
* By John Resig http://ejohn.org/
* MIT Licensed.
*/
// Inspired by base2 and Prototype
var Class = (function () {
//initializing是为了解决我们之前说的继承导致原型有多余参数的问题。当我们直接将父类的实例赋值给子类原型时。是会调用一次父类的构造函数的。所以这边会把真正的构造流程放到init函数里面,通过initializing来表示当前是不是处于构造原型阶段,为true的话就不会调用init。
//fnTest用来匹配代码里面有没有使用super关键字。对于一些浏览器`function(){xyz;}`会生成个字符串,并且会把里面的代码弄出来,有的浏览器就不会。`/xyz/.test(function(){xyz;})`为true代表浏览器支持看到函数的内部代码,所以用`/\b_super\b/`来匹配。如果不行,就不管三七二十一。所有的函数都算有super关键字,于是就是个必定匹配的正则。
var initializing = false,
fnTest = /xyz/.test(function () {
xyz;
}) ? /\b_super\b/ : /.*/;
// The base Class implementation (does nothing)
// 超级父类
this.Class = function () {
};
// Create a new Class that inherits from this class
// 生成一个类,这个类会具有extend方法用于继续继承下去
Class.extend = function (prop) {
//保留当前类,一般是父类的原型
//this指向父类。初次时指向Class超级父类
var _super = this.prototype;
// Instantiate a base class (but only create the instance,
// don't run the init constructor)
//开关 用来使原型赋值时不调用真正的构成流程
initializing = true;
var prototype = new this();
initializing = false;
// Copy the properties over onto the new prototype
for (var name in prop) {
// Check if we're overwriting an existing function
//这边其实就是很简单的将prop的属性混入到子类的原型上。如果是函数我们就要做一些特殊处理
prototype[name] = typeof prop[name] == "function" &&
typeof _super[name] == "function" && fnTest.test(prop[name]) ?
(function (name, fn) {
//通过闭包,返回一个新的操作函数.在外面包一层,这样我们可以做些额外的处理
return function () {
var tmp = this._super;
// Add a new ._super() method that is the same method
// but on the super-class
// 调用一个函数时,会给this注入一个_super方法用来调用父类的同名方法
this._super = _super[name];
// The method only need to be bound temporarily, so we
// remove it when we're done executing
//因为上面的赋值,是的这边的fn里面可以通过_super调用到父类同名方法
var ret = fn.apply(this, arguments);
//离开时 保存现场环境,恢复值。
this._super = tmp;
return ret;
};
})(name, prop[name]) :
prop[name];
}
// 这边是返回的类,其实就是我们返回的子类
function Class() {
// All construction is actually done in the init method
if (!initializing && this.init)
this.init.apply(this, arguments);
}
// 赋值原型链,完成继承
Class.prototype = prototype;
// 改变constructor引用
Class.prototype.constructor = Class;
// 为子类也添加extend方法
Class.extend = arguments.callee;
return Class;
}
return Class;
})()
//组件框架
var Base = (function () {
var _indexOf = function (array, key) {
if (array === null) return -1
var i = 0,
length = array.length
for (; i < length; i++)
if (array[i] === item) return i
return -1
}
//事件处理
var Event = Class.extend({
//添加监听
on: function (key, listener) {
//this.__events存储所有的处理函数
if (!this.__events) {
this.__events = {}
}
if (!this.__events[key]) {
this.__events[key] = []
}
if (_indexOf(this.__events, listener) === -1 && typeof listener === 'function') {
this.__events[key].push(listener)
}
return this
},
//触发一个事件,也就是通知
fire: function (key) {
if (!this.__events || !this.__events[key]) return
//Array.prototype.slice.call(arguments)能将具有length属性的对象转成数组
// args 作用为 传递入 以事件为名的方法中
var args = Array.prototype.slice.call(arguments, 1) || []
var listeners = this.__events[key] //obj
var i = 0
var l = listeners.length //集合中的 方法数量
for (i; i < l; i++) {
listeners[i].apply(this, args)
}
return this
},
//取消监听
off: function (key, listener) {
if (!key && !listener) {
this.__events = {}
}
//不传监听函数,就去掉当前key下面的所有的监听函数
if (key && !listener) {
delete this.__events[key]
}
if (key && listener) {
var listeners = this.__events[key]
var index = _indexOf(listeners, listener)
(index > -1) && listeners.splice(index, 1)
}
return this;
}
})
var Base = Event.extend({
defaults: {},
item: {},
init: function (config) {
//自动保存配置项
this._config = _.extend(this.defaults, config);
this.$autoWatch();
this.initialize();
},
initialize: function () {
},
get: function (key) {
return this._config[key]
},
set: function (key, value) {
this._config[key] = value;
},
bind: function () {
},
render: function () {
},
destroy: function () {
},
$autoWatch: function () {
var Datas = this._config;
var num = 1;
var self = this
_.each(Datas, function (item, index) {
this.item = item;
var Fn = {};
var tempArr = [];
var fnString = (_.template([
"var Arr = {",
'<%= index%> : this.item,',
'_<%= index%> : this.item',
'};return Arr'
].join(" ")))({
index: index
});
var WatchFnString = (_.template([
'var _this = this;',
'Object.defineProperty(this,"<%= index%>",{',
'set:function(Val){',
'if(Val != this._<%= index%>){',
'this._<%= index%> = Val;',
'_this.fire("change:<%= index%>",Val);',
'}else{return}},',
'get:function(){',
'return this._<%= index%>',
'}})'
].join(" ")))({
index: index
})
//执行监视过程
tempArr = new Function(fnString).bind(this)();
_.extend(this, tempArr);
(new Function(WatchFnString)).apply(this);
num++;
}.bind(this))
}
})
return Base
})() | }
}
}(), e
}); |
fCoEGIET_template.py | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FCoEGIET(Base):
__slots__ = ()
_SDM_NAME = 'fCoEGIET'
_SDM_ATT_MAP = {
'FcoeHeaderVersion': 'fCoEGIET.header.fcoeHeader.version-1',
'FcoeHeaderReserved': 'fCoEGIET.header.fcoeHeader.reserved-2',
'FcoeHeaderESOF': 'fCoEGIET.header.fcoeHeader.eSOF-3',
'DeviceDataFramesDeviceDataInfo': 'fCoEGIET.header.fcHeader.rCTL.deviceDataFrames.deviceDataInfo-4',
'RCTLReserved': 'fCoEGIET.header.fcHeader.rCTL.reserved-5',
'ExtendedLinkServicesInfo': 'fCoEGIET.header.fcHeader.rCTL.extendedLinkServices.info-6',
'Fc4LinkDataInfo': 'fCoEGIET.header.fcHeader.rCTL.fc4LinkData.info-7',
'VideoDataInfo': 'fCoEGIET.header.fcHeader.rCTL.videoData.info-8',
'ExtendedHeaderInfo': 'fCoEGIET.header.fcHeader.rCTL.extendedHeader.info-9',
'BasicLinkServicesInfo': 'fCoEGIET.header.fcHeader.rCTL.basicLinkServices.info-10',
'LinkControlFramesInfo': 'fCoEGIET.header.fcHeader.rCTL.linkControlFrames.info-11',
'ExtendedRoutingInfo': 'fCoEGIET.header.fcHeader.rCTL.extendedRouting.info-12',
'FcHeaderDstId': 'fCoEGIET.header.fcHeader.dstId-13',
'FcHeaderCsCTLPriority': 'fCoEGIET.header.fcHeader.csCTLPriority-14',
'FcHeaderSrcId': 'fCoEGIET.header.fcHeader.srcId-15',
'FcHeaderType': 'fCoEGIET.header.fcHeader.type-16',
'FCTLCustom': 'fCoEGIET.header.fcHeader.fCTL.custom-17',
'BuildFCTLExchangeContext': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.exchangeContext-18',
'BuildFCTLSequenceContext': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.sequenceContext-19',
'BuildFCTLFirstSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.firstSequence-20',
'BuildFCTLLastSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.lastSequence-21',
'BuildFCTLEndSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.endSequence-22',
'BuildFCTLEndConnection': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.endConnection-23',
'BuildFCTLCsCTLPriority': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.csCTLPriority-24',
'BuildFCTLSequenceInitiative': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.sequenceInitiative-25',
'BuildFCTLFcXIDReassigned': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcXIDReassigned-26',
'BuildFCTLFcInvalidateXID': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcInvalidateXID-27',
'BuildFCTLAckForm': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.ackForm-28',
'BuildFCTLFcDataCompression': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcDataCompression-29',
'BuildFCTLFcDataEncryption': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fcDataEncryption-30',
'BuildFCTLRetransmittedSequence': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.retransmittedSequence-31',
'BuildFCTLUnidirectionalTransmit': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.unidirectionalTransmit-32',
'BuildFCTLContinueSeqCondition': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.continueSeqCondition-33',
'BuildFCTLAbortSeqCondition': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.abortSeqCondition-34',
'BuildFCTLRelativeOffsetPresent': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.relativeOffsetPresent-35',
'BuildFCTLExchangeReassembly': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.exchangeReassembly-36',
'BuildFCTLFillBytes': 'fCoEGIET.header.fcHeader.fCTL.buildFCTL.fillBytes-37',
'FcHeaderSeqID': 'fCoEGIET.header.fcHeader.seqID-38',
'FcHeaderDfCTL': 'fCoEGIET.header.fcHeader.dfCTL-39',
'FcHeaderSeqCNT': 'fCoEGIET.header.fcHeader.seqCNT-40',
'FcHeaderOxID': 'fCoEGIET.header.fcHeader.oxID-41',
'FcHeaderRxID': 'fCoEGIET.header.fcHeader.rxID-42',
'FcHeaderParameter': 'fCoEGIET.header.fcHeader.parameter-43',
'FcCTRevision': 'fCoEGIET.header.fcCT.revision-44',
'FcCTInId': 'fCoEGIET.header.fcCT.inId-45',
'FcCTGsType': 'fCoEGIET.header.fcCT.gsType-46',
'FcCTGsSubtype': 'fCoEGIET.header.fcCT.gsSubtype-47',
'FcCTOptions': 'fCoEGIET.header.fcCT.options-48',
'FcCTReserved': 'fCoEGIET.header.fcCT.reserved-49',
'FCSOpcode': 'fCoEGIET.header.FCS.opcode-50',
'FCSMaxsize': 'fCoEGIET.header.FCS.maxsize-51',
'FCSReserved': 'fCoEGIET.header.FCS.reserved-52',
'FCSInterconnectElementName': 'fCoEGIET.header.FCS.interconnectElementName-53',
'FcCRCAutoCRC': 'fCoEGIET.header.fcCRC.autoCRC-54',
'FcCRCGenerateBadCRC': 'fCoEGIET.header.fcCRC.generateBadCRC-55',
'FcTrailerEEOF': 'fCoEGIET.header.fcTrailer.eEOF-56',
'FcTrailerReserved': 'fCoEGIET.header.fcTrailer.reserved-57',
}
def __init__(self, parent, list_op=False):
super(FCoEGIET, self).__init__(parent, list_op)
@property
def FcoeHeaderVersion(self):
"""
Display Name: Version
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderVersion']))
@property
def FcoeHeaderReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderReserved']))
@property
def FcoeHeaderESOF(self):
"""
Display Name: E-SOF
Default Value: 54
Value Format: decimal
Available enum values: SOFf - Fabric, 40, SOFi4 - Initiate Class 4, 41, SOFi2 - Initiate Class 2, 45, SOFi3 - Initiate Class 3, 46, SOFn4 - Normal Class 4, 49, SOFn2 - Normal Class 2, 53, SOFn3 - Normal Class 3, 54, SOFc4 - Connect Class 4, 57, SOFn1 - Normal Class 1 or 6, 250
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderESOF']))
@property
def DeviceDataFramesDeviceDataInfo(self):
"""
Display Name: Information
Default Value: 0
Value Format: decimal
Available enum values: Uncategorized Information, 0, Solicited Data, 1, Unsolicited Control, 2, Solicited Control, 3, Unsolicited Data, 4, Data Descriptor, 5, Unsolicited Command, 6, Command Status, 7
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DeviceDataFramesDeviceDataInfo']))
@property
def RCTLReserved(self):
|
@property
def ExtendedLinkServicesInfo(self):
"""
Display Name: Information
Default Value: 33
Value Format: decimal
Available enum values: Solicited Data, 32, Request, 33, Reply, 34
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedLinkServicesInfo']))
@property
def Fc4LinkDataInfo(self):
"""
Display Name: Information
Default Value: 48
Value Format: decimal
Available enum values: Uncategorized Information, 48, Solicited Data, 49, Unsolicited Control, 50, Solicited Control, 51, Unsolicited Data, 52, Data Descriptor, 53, Unsolicited Command, 54, Command Status, 55
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Fc4LinkDataInfo']))
@property
def VideoDataInfo(self):
"""
Display Name: Information
Default Value: 68
Value Format: decimal
Available enum values: Unsolicited Data, 68
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VideoDataInfo']))
@property
def ExtendedHeaderInfo(self):
"""
Display Name: Information
Default Value: 80
Value Format: decimal
Available enum values: Virtual Fabric Tagging Header, 80, Inter Fabric Routing Header, 81, Encapsulation Header, 82
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedHeaderInfo']))
@property
def BasicLinkServicesInfo(self):
"""
Display Name: Information
Default Value: 128
Value Format: decimal
Available enum values: No Operation, 128, Abort Sequence, 129, Remove Connection, 130, Basic Accept, 132, Basic Reject, 133, Dedicated Connection Preempted, 134
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BasicLinkServicesInfo']))
@property
def LinkControlFramesInfo(self):
"""
Display Name: Information
Default Value: 192
Value Format: decimal
Available enum values: Acknowledge_1, 128, Acknowledge_0, 129, Nx Port Reject, 130, Fabric Reject, 131, Nx Port Busy, 132, Fabric Busy to Data Frame, 133, Fabric Busy to Link Control Frame, 134, Link Credit Reset, 135, Notify, 136, End, 137
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkControlFramesInfo']))
@property
def ExtendedRoutingInfo(self):
"""
Display Name: Information
Default Value: 240
Value Format: decimal
Available enum values: Vendor Unique, 240
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedRoutingInfo']))
@property
def FcHeaderDstId(self):
"""
Display Name: Destination ID
Default Value: 0
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDstId']))
@property
def FcHeaderCsCTLPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderCsCTLPriority']))
@property
def FcHeaderSrcId(self):
"""
Display Name: Source ID
Default Value: 0
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSrcId']))
@property
def FcHeaderType(self):
"""
Display Name: Type
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderType']))
@property
def FCTLCustom(self):
"""
Display Name: Custom
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCTLCustom']))
@property
def BuildFCTLExchangeContext(self):
"""
Display Name: Exchange Context
Default Value: 0
Value Format: decimal
Available enum values: Originator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeContext']))
@property
def BuildFCTLSequenceContext(self):
"""
Display Name: Sequence Context
Default Value: 0
Value Format: decimal
Available enum values: Initiator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceContext']))
@property
def BuildFCTLFirstSequence(self):
"""
Display Name: First Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, First, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFirstSequence']))
@property
def BuildFCTLLastSequence(self):
"""
Display Name: Last Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLLastSequence']))
@property
def BuildFCTLEndSequence(self):
"""
Display Name: End Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndSequence']))
@property
def BuildFCTLEndConnection(self):
"""
Display Name: End Connection
Default Value: 0
Value Format: decimal
Available enum values: Alive, 0, Pending, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndConnection']))
@property
def BuildFCTLCsCTLPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0
Value Format: decimal
Available enum values: CS_CTL, 0, Priority, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLCsCTLPriority']))
@property
def BuildFCTLSequenceInitiative(self):
"""
Display Name: Sequence Initiative
Default Value: 0
Value Format: decimal
Available enum values: Hold, 0, Transfer, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceInitiative']))
@property
def BuildFCTLFcXIDReassigned(self):
"""
Display Name: FC XID Reassigned
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcXIDReassigned']))
@property
def BuildFCTLFcInvalidateXID(self):
"""
Display Name: FC Invalidate XID
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcInvalidateXID']))
@property
def BuildFCTLAckForm(self):
"""
Display Name: ACK_Form
Default Value: 0
Value Format: decimal
Available enum values: No assistance provided, 0, ACK_1 Required, 1, reserved, 2, Ack_0 Required, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAckForm']))
@property
def BuildFCTLFcDataCompression(self):
"""
Display Name: FC Data Compression
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataCompression']))
@property
def BuildFCTLFcDataEncryption(self):
"""
Display Name: FC Data Encryption
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataEncryption']))
@property
def BuildFCTLRetransmittedSequence(self):
"""
Display Name: Retransmitted Sequence
Default Value: 0
Value Format: decimal
Available enum values: Original, 0, Retransmission, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRetransmittedSequence']))
@property
def BuildFCTLUnidirectionalTransmit(self):
"""
Display Name: Unidirectional Transmit
Default Value: 0
Value Format: decimal
Available enum values: Bi-directional, 0, Unidirectional, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLUnidirectionalTransmit']))
@property
def BuildFCTLContinueSeqCondition(self):
"""
Display Name: Continue Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: No information, 0, Sequence to follow-immediately, 1, Squence to follow-soon, 2, Sequence to follow-delayed, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLContinueSeqCondition']))
@property
def BuildFCTLAbortSeqCondition(self):
"""
Display Name: Abort Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: 0x00, 0, 0x01, 1, 0x10, 2, 0x11, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAbortSeqCondition']))
@property
def BuildFCTLRelativeOffsetPresent(self):
"""
Display Name: Relative Offset Present
Default Value: 0
Value Format: decimal
Available enum values: Parameter field defined, 0, Relative offset, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRelativeOffsetPresent']))
@property
def BuildFCTLExchangeReassembly(self):
"""
Display Name: Exchange Reassembly
Default Value: 0
Value Format: decimal
Available enum values: off, 0, on, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeReassembly']))
@property
def BuildFCTLFillBytes(self):
"""
Display Name: Fill Bytes
Default Value: 0
Value Format: decimal
Available enum values: 0 bytes of fill, 0, 1 bytes of fill, 1, 2 bytes of fill, 2, 3 bytes of fill, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFillBytes']))
@property
def FcHeaderSeqID(self):
"""
Display Name: SEQ_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqID']))
@property
def FcHeaderDfCTL(self):
"""
Display Name: DF_CTL
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDfCTL']))
@property
def FcHeaderSeqCNT(self):
"""
Display Name: SEQ_CNT
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqCNT']))
@property
def FcHeaderOxID(self):
"""
Display Name: OX_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderOxID']))
@property
def FcHeaderRxID(self):
"""
Display Name: RX_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderRxID']))
@property
def FcHeaderParameter(self):
"""
Display Name: Parameter
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderParameter']))
@property
def FcCTRevision(self):
"""
Display Name: Revision
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTRevision']))
@property
def FcCTInId(self):
"""
Display Name: IN_ID
Default Value: 0x000000
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTInId']))
@property
def FcCTGsType(self):
"""
Display Name: GS_Type
Default Value: 250
Value Format: decimal
Available enum values: Event Service, 244, Key Distribution Service, 247, Alias Service, 248, Management Service, 250, Time Service, 251, Directory Service, 252
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsType']))
@property
def FcCTGsSubtype(self):
"""
Display Name: GS_Subtype
Default Value: 0x01
Value Format: hex
Available enum values: Fabric Configuration Server, 1, Unzoned Name Server, 2, Fabric Zone Server, 3, Lock Server, 4, Performance Server, 5, Security Policy Server, 6, Security Information Server, 7
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsSubtype']))
@property
def FcCTOptions(self):
"""
Display Name: Options
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTOptions']))
@property
def FcCTReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTReserved']))
@property
def FCSOpcode(self):
"""
Display Name: Command/Response Code
Default Value: 273
Value Format: decimal
Available enum values: GTIN, 256, GIEL, 257, GIET, 273, GDID, 274, GMID, 275, GFN, 276, GIELN, 277, GMAL, 278, GIEIL, 279, GPL, 280, GPT, 289, GPPN, 290, GAPNL, 292, GPS, 294, GPSC, 295, GSES, 304, GIEAG, 320, GPAG, 321, GPLNL, 401, GPLT, 402, GPLML, 403, GPAB, 407, GNPL, 417, GPNL, 418, GPFCP, 420, GPLI, 421, GNID, 433, RIELN, 533, RPL, 640, RPLN, 657, RPLT, 658, RPLM, 659, RPAB, 664, RPFCP, 666, RPLI, 667, DPL, 896, DPLN, 913, DPLM, 914, DPLML, 915, DPLI, 916, DPAB, 917, DPALL, 927, FTR, 1024, FPNG, 1025
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSOpcode']))
@property
def FCSMaxsize(self):
"""
Display Name: Maximum/Residual Size
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSMaxsize']))
@property
def FCSReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSReserved']))
@property
def FCSInterconnectElementName(self):
"""
Display Name: Interconnect Element Name
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCSInterconnectElementName']))
@property
def FcCRCAutoCRC(self):
"""
Display Name: Auto
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCAutoCRC']))
@property
def FcCRCGenerateBadCRC(self):
"""
Display Name: Bad CRC
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCGenerateBadCRC']))
@property
def FcTrailerEEOF(self):
"""
Display Name: E-EOF
Default Value: 65
Value Format: decimal
Available enum values: EOFn - Normal, 65, EOFt - Terminate, 66, EOFrt - Remove Terminate, 68, EOFni - Normal Invalid, 73, EOFrti - Remove Terminate Invalid, 79, EOFa - Abort, 80
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerEEOF']))
@property
def FcTrailerReserved(self):
"""
Display Name: Reserved
Default Value: 0x000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerReserved']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| """
Display Name: Reserved
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RCTLReserved'])) |
sender.go | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package client
import (
"context"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage/engine/enginepb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
)
// TxnType specifies whether a transaction is the root (parent)
// transaction, or a leaf (child) in a tree of client.Txns, as
// is used in a DistSQL flow.
type TxnType int
const (
_ TxnType = iota
// RootTxn specifies this sender is the root transaction, and is
// responsible for aggregating all transactional state and
// finalizing the transaction. The root txn is responsible for
// heartbeating the transaction record.
RootTxn
// LeafTxn specifies this sender is for one of potentially many
// distributed client transactions. The state from this transaction
// must be propagated back to the root transaction and used to
// augment its state before the transaction can be finalized. Leaf
// transactions do not heartbeat the transaction record.
//
// Note: As leaves don't perform heartbeats, the transaction might
// be cleaned up while this leaf is executing an operation. We rely
// on the cleanup process poisoning the AbortSpans for all intents
// so that reads performed through a leaf txn don't miss writes
// previously performed by the transaction (at least not until the
// expiration of the GC period / abort span entry timeout).
LeafTxn
)
// Sender is implemented by modules throughout the crdb stack, on both
// the "client" and the "server", involved in passing along and
// ultimately evaluating requests (batches). The interface is now
// considered regrettable because it's too narrow and at times leaky.
// Notable implementors: client.Txn, kv.TxnCoordSender, storage.Node,
// storage.Store, storage.Replica.
type Sender interface {
// Send sends a batch for evaluation.
// The contract about whether both a response and an error can be
// returned varies between layers.
//
// The caller retains ownership of all the memory referenced by the
// BatchRequest; the callee is not allowed to hold on to any parts
// of it past after it returns from the call (this is so that the
// client module can allocate requests from a pool and reuse
// them). For example, the DistSender makes sure that, if there are
// concurrent requests, it waits for all of them before returning,
// even in error cases.
//
// Once the request reaches the `transport` module, anothern
// restriction applies (particularly relevant for the case when the
// node that the transport is talking to is local, and so there's
// not gRPC marshaling/unmarshaling):
// - the callee has to treat everything inside the BatchRequest as
// read-only. This is so that the client module retains the right to
// pass pointers into its internals, like for example the
// Transaction. This wouldn't work if the server would be allowed to
// change the Transaction willy-nilly.
//
// TODO(andrei): The client does not currently use this last
// guarantee; it clones the txn for every request. Given that a
// client.Txn can be used concurrently, in order for the client to
// take advantage of this, it would need to switch to a
// copy-on-write scheme so that its updates to the txn do not race
// with the server reading it. We should do this to avoid the
// cloning allocations. And to be frank, it'd be a good idea for the
// BatchRequest/Response to generally stop carrying transactions;
// the requests usually only need a txn id and some timestamp. The
// responses would ideally contain a list of targeted instructions
// about what the client should update, as opposed to a full txn
// that the client is expected to diff with its copy and apply all
// the updates.
Send(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)
}
// TxnSender is the interface used to call into a CockroachDB instance
// when sending transactional requests. In addition to the usual
// Sender interface, TxnSender facilitates marshaling of transaction
// metadata between the "root" client.Txn and "leaf" instances.
type TxnSender interface {
Sender
// AnchorOnSystemConfigRange ensures that the transaction record,
// if/when it will be created, will be created on the system config
// range. This is useful because some commit triggers only work when
// the EndTxn is evaluated on that range.
//
// An error is returned if the transaction's key has already been
// set by anything other than a previous call to this function
// (i.e. if the transaction already performed any writes).
// It is allowed to call this method multiple times.
AnchorOnSystemConfigRange() error
// GetLeafTxnInputState retrieves the input state necessary and
// sufficient to initialize a LeafTxn from the current RootTxn.
//
// If AnyTxnStatus is passed, then this function never returns
// errors.
GetLeafTxnInputState(context.Context, TxnStatusOpt) (roachpb.LeafTxnInputState, error)
// GetLeafTxnFinalState retrieves the final state of a LeafTxn
// necessary and sufficient to update a RootTxn with progress made
// on its behalf by the LeafTxn.
GetLeafTxnFinalState(context.Context, TxnStatusOpt) (roachpb.LeafTxnFinalState, error)
// UpdateRootWithLeafFinalState updates a RootTxn using the final
// state of a LeafTxn.
UpdateRootWithLeafFinalState(context.Context, *roachpb.LeafTxnFinalState)
// SetUserPriority sets the txn's priority.
SetUserPriority(roachpb.UserPriority) error
// SetDebugName sets the txn's debug name.
SetDebugName(name string)
// TxnStatus exports the txn's status.
TxnStatus() roachpb.TransactionStatus
// SetFixedTimestamp makes the transaction run in an unusual way, at
// a "fixed timestamp": Timestamp and ReadTimestamp are set to ts,
// there's no clock uncertainty, and the txn's deadline is set to ts
// such that the transaction can't be pushed to a different
// timestamp.
//
// This is used to support historical queries (AS OF SYSTEM TIME
// queries and backups). This method must be called on every
// transaction retry (but note that retries should be rare for
// read-only queries with no clock uncertainty).
SetFixedTimestamp(ctx context.Context, ts hlc.Timestamp)
// ManualRestart bumps the transactions epoch, and can upgrade the
// timestamp and priority.
// An uninitialized timestamp can be passed to leave the timestamp
// alone.
//
// Used by the SQL layer which sometimes knows that a transaction
// will not be able to commit and prefers to restart early.
// It is also used after synchronizing concurrent actors using a txn
// when a retryable error is seen.
// TODO(andrei): this second use should go away once we move to a
// TxnAttempt model.
ManualRestart(context.Context, roachpb.UserPriority, hlc.Timestamp)
// UpdateStateOnRemoteRetryableErr updates the txn in response to an
// error encountered when running a request through the txn.
UpdateStateOnRemoteRetryableErr(context.Context, *roachpb.Error) *roachpb.Error
// DisablePipelining instructs the TxnSender not to pipeline
// requests. It should rarely be necessary to call this method. It
// is only recommended for transactions that need extremely precise
// control over the request ordering, like the transaction that
// merges ranges together.
DisablePipelining() error
// ReadTimestamp returns the transaction's current read timestamp.
// Note a transaction can be internally pushed forward in time
// before committing so this is not guaranteed to be the commit
// timestamp. Use CommitTimestamp() when needed.
ReadTimestamp() hlc.Timestamp
// CommitTimestamp returns the transaction's start timestamp.
//
// This method is guaranteed to always return the same value while
// the transaction is open. To achieve this, the first call to this
// method also anchors the start timestamp and prevents the sender
// from automatically pushing transactions forward (i.e. handling
// certain forms of contention / txn conflicts automatically).
//
// In other words, using this method just once increases the
// likelihood that a retry error will bubble up to a client.
//
// See CommitTimestampFixed() below.
CommitTimestamp() hlc.Timestamp
// CommitTimestampFixed returns true if the commit timestamp has
// been fixed to the start timestamp and cannot be pushed forward.
CommitTimestampFixed() bool
// ProvisionalCommitTimestamp returns the transaction's provisional
// commit timestamp. This can move forward throughout the txn's
// lifetime. See the explanatory comments for the WriteTimestamp
// field on TxnMeta.
ProvisionalCommitTimestamp() hlc.Timestamp
// IsSerializablePushAndRefreshNotPossible returns true if the
// transaction is serializable, its timestamp has been pushed and
// there's no chance that refreshing the read spans will succeed
// later (thus allowing the transaction to commit and not be
// restarted). Used to detect whether the txn is guaranteed to get a
// retriable error later.
//
// Note that this method allows for false negatives: sometimes the
// client only figures out that it's been pushed when it sends an
// EndTxn - i.e. it's possible for the txn to have been pushed
// asynchoronously by some other operation (usually, but not
// exclusively, by a high-priority txn with conflicting writes).
IsSerializablePushAndRefreshNotPossible() bool
// Active returns true iff some commands have been performed with
// this txn already.
//
// TODO(knz): Remove this, see
// https://github.com/cockroachdb/cockroach/issues/15012
Active() bool
// Epoch returns the txn's epoch.
Epoch() enginepb.TxnEpoch
// PrepareRetryableError generates a
// TransactionRetryWithProtoRefreshError with a payload initialized
// from this txn.
PrepareRetryableError(ctx context.Context, msg string) error
// TestingCloneTxn returns a clone of the transaction's current
// proto. This is for use by tests only. Use
// GetLeafTxnInitialState() instead when creating leaf transactions.
TestingCloneTxn() *roachpb.Transaction
// Step creates a sequencing point in the current transaction. A
// sequencing point establishes a snapshot baseline for subsequent
// read-only operations: until the next sequencing point, read-only
// operations observe the data at the time the snapshot was
// established and ignore writes performed since.
//
// Before the first step is taken, the transaction operates as if
// there was a step after every write: each read to a key is able to
// see the latest write before it. This makes the step behavior
// opt-in and backward-compatible with existing code which does not
// need it.
// The method is idempotent.
Step() error
// DisableStepping disables the sequencing point behavior and
// ensures that every read can read the latest write. The effect
// remains disabled until the next call to Step(). The method is
// idempotent.
//
// Note that a Sender is initially in the non-stepping mode,
// i.e. uses reads-own-writes by default.
DisableStepping() error
}
// TxnStatusOpt represents options for TxnSender.GetMeta().
type TxnStatusOpt int
const (
// AnyTxnStatus means GetMeta() will return the info without
// checking the txn's status.
AnyTxnStatus TxnStatusOpt = iota
// OnlyPending means GetMeta() will return an error if the
// transaction is not in the pending state.
// This is used when sending the txn from root to leaves so that we
// don't create leaves that start up in an aborted state - which is
// not allowed.
OnlyPending
)
// TxnSenderFactory is the interface used to create new instances
// of TxnSender.
type TxnSenderFactory interface {
// RootTransactionalSender returns a root sender to be used for
// transactional requests. txn contains the transaction whose
// requests this sender will carry.
RootTransactionalSender(
txn *roachpb.Transaction, pri roachpb.UserPriority,
) TxnSender
// LeafTransactionalSender returns a leaf sender to be used for
// transactional requests on behalf of a root.
LeafTransactionalSender(tis *roachpb.LeafTxnInputState) TxnSender
// NonTransactionalSender returns a sender to be used for
// non-transactional requests. Generally this is a sender that
// TransactionalSender() wraps.
NonTransactionalSender() Sender
}
// SenderFunc is an adapter to allow the use of ordinary functions as
// Senders.
type SenderFunc func(context.Context, roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error)
// Send calls f(ctx, c).
func (f SenderFunc) Send(
ctx context.Context, ba roachpb.BatchRequest,
) (*roachpb.BatchResponse, *roachpb.Error) {
return f(ctx, ba)
}
// NonTransactionalFactoryFunc is a TxnSenderFactory that cannot, in
// fact, create any transactional senders, only non-transactional
// ones.
type NonTransactionalFactoryFunc SenderFunc
var _ TxnSenderFactory = NonTransactionalFactoryFunc(nil)
// RootTransactionalSender is part of the TxnSenderFactory.
func (f NonTransactionalFactoryFunc) RootTransactionalSender(
_ *roachpb.Transaction, _ roachpb.UserPriority,
) TxnSender {
panic("not supported")
}
// LeafTransactionalSender is part of the TxnSenderFactory.
func (f NonTransactionalFactoryFunc) LeafTransactionalSender(
_ *roachpb.LeafTxnInputState,
) TxnSender {
panic("not supported")
}
// NonTransactionalSender is part of the TxnSenderFactory.
func (f NonTransactionalFactoryFunc) NonTransactionalSender() Sender {
return SenderFunc(f)
}
// SendWrappedWith is a convenience function which wraps the request
// in a batch and sends it via the provided Sender and headers. It
// returns the unwrapped response or an error. It's valid to pass a
// `nil` context; an empty one is used in that case.
func SendWrappedWith(
ctx context.Context, sender Sender, h roachpb.Header, args roachpb.Request,
) (roachpb.Response, *roachpb.Error) {
ba := roachpb.BatchRequest{}
ba.Header = h
ba.Add(args)
br, pErr := sender.Send(ctx, ba)
if pErr != nil {
return nil, pErr
}
unwrappedReply := br.Responses[0].GetInner()
header := unwrappedReply.Header()
header.Txn = br.Txn
unwrappedReply.SetHeader(header)
return unwrappedReply, nil
}
// SendWrapped is identical to SendWrappedWith with a zero header.
// TODO(tschottdorf): should move this to testutils and merge with
// other helpers which are used, for example, in `storage`.
func SendWrapped(
ctx context.Context, sender Sender, args roachpb.Request,
) (roachpb.Response, *roachpb.Error) {
return SendWrappedWith(ctx, sender, roachpb.Header{}, args)
}
// Wrap returns a Sender which applies the given function before delegating to
// the supplied Sender.
func | (sender Sender, f func(roachpb.BatchRequest) roachpb.BatchRequest) Sender {
return SenderFunc(func(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
return sender.Send(ctx, f(ba))
})
}
| Wrap |
raster-sld.js | angular.module('examind.components.style.editor.new.raster.sld', [])
.controller('RasterSldController', RasterSldController)
.directive('rasterSld', rasterSldDirective);
function rasterSldDirective() {
return {
restrict: "E",
templateUrl: "components/style-editor/new-style/raster-style/raster-sld/raster-sld.html",
controller: 'RasterSldController',
controllerAs: "rasterSldCtrl",
scope: {
rasterPalette: "=",
selectedDataProperties: "=",
helper: "=",
drawThresholds: "&",
displayNewStyle: "&",
generateRasterPalette: "&"
}
};
}
function | ($scope, $timeout, $modal) {
var self = this;
self.rasterPalette = $scope.rasterPalette;
self.selectedDataProperties = $scope.selectedDataProperties;
self.helper = $scope.helper;
self.drawThresholds = $scope.drawThresholds();
self.displayNewStyle = $scope.displayNewStyle();
self.generateRasterPalette = $scope.generateRasterPalette();
/**
* Fix rzslider bug with angular on value changed for band selector.
*/
self.fixRZSlider = function () {
self.rasterPalette.palette.rasterMinValue = Number(self.rasterPalette.band.selected.minValue);
self.rasterPalette.palette.rasterMaxValue = Number(self.rasterPalette.band.selected.maxValue);
};
self.choosePalette = function (index) {
self.rasterPalette.palette.img_palette = 'img/palette' + index + '.png';
self.rasterPalette.palette.index = index;
};
/**
* Remove repartition entry and apply this change on the histogram.
* @param point
*/
self.removeRepartitionEntry = function (point) {
if (self.rasterPalette.repartition) {
var dlg = $modal.open({
templateUrl: 'views/modal-confirm.html',
controller: 'ModalConfirmController',
resolve: {
'keyMsg': function () {
return "dialog.message.confirm.delete.repartitionEntry";
}
}
});
dlg.result.then(function (cfrm) {
if (cfrm) {
var indexToRemove = self.rasterPalette.repartition.indexOf(point);
if (indexToRemove > -1) {
self.rasterPalette.repartition.splice(indexToRemove, 1);
}
//remove threshold vertical line on graph.
if (point.data) {
for (var j = 0; j < self.rasterPalette.dataXArray.length; j++) {
if (self.rasterPalette.dataXArray[j] >= point.data) {
window.c3chart.xgrids.remove({value: j});
break;
}
}
} else {
self.helper.selectedRule.symbolizers[0].colorMap.function.nanColor = null;
}
}
});
}
};
/**
* Action to add new value in colorMap
*/
self.addColorMapEntry = function () {
self.rasterPalette.repartition.push({data: 255, color: '#000000'});
};
/**
* Apply RGB composition for current style and clean up colormap and rasterPalette.repartition.
*/
self.applyRGBComposition = function () {
var rgbChannels = self.rasterPalette.rgbChannels;
var isValid = true;
//@TODO confirm with sld conformance, is it necessary to check channel's band not empty?
for (var i = 0; i < rgbChannels.length; i++) {
if (rgbChannels[i].name === '') {
isValid = false;
break;
}
}
if (!isValid) {
alert('Please select a band for all channels!');
return;
} else {
//Apply rgb channels to selected rule
self.helper.selectedRule.symbolizers[0].channelSelection = {
greyChannel: null,
rgbChannels: self.rasterPalette.rgbChannels
};
//clean colorMap for selected rule
self.rasterPalette.repartition = undefined;
self.helper.selectedRule.symbolizers[0].colorMap = undefined;
}
};
/**
* Apply grayscale channel for current style and clean up colormap and rasterPalette.repartition.
*/
self.applyGrayscaleComposition = function () {
self.helper.selectedRule.symbolizers[0].channelSelection = {
greyChannel: self.rasterPalette.greyChannel,
rgbChannels: null
};
//clean colorMap for selected rule
self.rasterPalette.repartition = undefined;
self.helper.selectedRule.symbolizers[0].colorMap = undefined;
};
/**
* Hack to fix color picker problem with transparent value
* empty is transparent but examind not support empty color
* so we need to replace empty string by #00000000
* after we need to change the reference object to trigger angular watcher
* @param point
* @param index
*/
self.checkColor = function (point, index) {
$timeout(function () {
point.color = !point.color ? '#00000000' : point.color;
$scope.optionsSLD.rasterPalette.repartition[index] = Object.assign({}, point);
}, 200);
};
}
| RasterSldController |
sm_controller_test.go | // Copyright (c) 2021 Red Hat, Inc.
// Copyright Contributors to the Open Cluster Management project
package servicemonitor
import (
"testing"
promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/stolostron/multicluster-observability-operator/operators/multiclusterobservability/pkg/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestRewriteLabels(t *testing.T) {
sm := &promv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: ocpMonitoringNamespace,
},
Spec: promv1.ServiceMonitorSpec{
Endpoints: []promv1.Endpoint{
{
Path: "test",
},
},
},
}
updated := rewriteLabels(sm, "")
if len(updated.Spec.NamespaceSelector.MatchNames) == 0 || updated.Spec.NamespaceSelector.MatchNames[0] != config.GetDefaultNamespace() |
if len(updated.Spec.Endpoints[0].MetricRelabelConfigs) != 1 {
t.Errorf("Wrong MetricRelabelConfigs: %v", updated.Spec.Endpoints[0].MetricRelabelConfigs)
}
}
| {
t.Errorf("Wrong NamespaceSelector: %v", updated.Spec.NamespaceSelector)
} |
day5.rs | pub(super) fn run() -> Result<(), super::Error> {
let mut plane = [[false; 8]; 128];
for line in super::read_input_lines::<String>("day5")? {
let line = line?;
let (row_num, seat_num) = find_seat(&line)?;
plane[row_num][seat_num] = true;
}
{
let result =
plane.iter()
.enumerate()
.flat_map(|(row_num, row)| {
row.iter()
.enumerate()
.filter_map(move |(seat_num, &occupied)| occupied.then(|| seat_id(row_num, seat_num)))
})
.max()
.ok_or("no solution")?;
println!("5a: {}", result);
assert_eq!(result, 919);
}
|
Ok(())
}
fn find_seat(pass: &str) -> Result<(usize, usize), super::Error> {
let seat_id: Result<usize, super::Error> =
pass.chars().try_fold(0, |row_num, c| match c {
'F' | 'L' => Ok(row_num * 2),
'B' | 'R' => Ok(row_num * 2 + 1),
_ => Err(format!("malformed pass {:?}", pass).into()),
});
let seat_id = seat_id?;
let row_num = seat_id / 8;
let seat_num = seat_id % 8;
Ok((row_num, seat_num))
}
fn seat_id(row_num: usize, seat_num: usize) -> usize {
row_num * 8 + seat_num
}
#[cfg(test)]
mod tests {
#[test]
fn find_seat_and_id() {
fn find_seat_and_id(pass: &str) -> (usize, usize, usize) {
let (row_num, seat_num) = super::find_seat(pass).unwrap();
let seat_id = super::seat_id(row_num, seat_num);
(row_num, seat_num, seat_id)
}
assert_eq!(find_seat_and_id("FBFBBFFRLR"), (44, 5, 357));
assert_eq!(find_seat_and_id("BFFFBBFRRR"), (70, 7, 567));
assert_eq!(find_seat_and_id("FFFBBBFRRR"), (14, 7, 119));
assert_eq!(find_seat_and_id("BBFFBBFRLL"), (102, 4, 820));
}
}
| {
let result =
plane.iter()
.enumerate()
.find_map(|(row_num, row)|
if row.iter().copied().any(std::convert::identity) {
row.iter()
.copied()
.position(std::ops::Not::not)
.map(|seat_num| seat_id(row_num, seat_num))
}
else {
None
})
.ok_or("no solution")?;
println!("5b: {}", result);
assert_eq!(result, 642);
} |
ArrowCircleDownLeft.d.ts | import { IconProps } from "../lib"; | declare const ArrowCircleDownLeft: (props: IconProps, ref: any) => import("solid-js").JSX.Element;
export default ArrowCircleDownLeft; |
|
64-minimum-path-sum.py | class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
"""
[1,3,1]
[1,5,1]
[4,2,1]
time O (nm)
space O(nm)
state -> sums[r][c] = min path sum till r, c position
initial state -> sums[0][0…cols] = inf
-> sums[0…cols][0] = inf
transition function -> sum[r][c] = min(sum[r-1][c], sum[r][c-1]) + sum[r][c]
calculation order: 1….rows-1; 1….cols-1
"""
return self.find_min_path_sum(grid)
def find_min_path_sum(self, grid):
rows, co | ls = len(grid), len(grid[0])
if not rows or not cols:
return -1
sums = [[float("inf") for _ in range(cols+1)] for _ in range(rows+1)]
for r in range(1, rows+1):
for c in range(1, cols+1):
if r == 1 and c == 1:
sums[r][c] = grid[r-1][c-1]
else:
sums[r][c] = min(sums[r-1][c], sums[r][c-1]) + grid[r-1][c-1]
return sums[rows][cols]
|
|
transform.rs | use crate::common::*;
use super::{
expr::As, Aggregation, AudienceBoard, Between, BinaryOp, BinaryOperator, Column, Context,
ContextKey, Distribution, Expr, ExprMeta, ExprT, ExprTree, Function, FunctionName, GenericRel,
GenericRelTree, Hash, HashAlgorithm, Literal, LiteralValue, Noisy, Projection, Rel, RelT,
Selection, Table, TableMeta, ToContext, TryToContext, ValidateError,
};
use crate::node::Access;
use crate::opt::{ContextError, RebaseRel};
use super::privacy::*;
/// Small helper to figure out if a given context key matches any of the given field patterns
fn matches_in<'a, I: IntoIterator<Item = &'a String>>(
iter: I,
key: &'a ContextKey,
) -> Result<bool, ValidateError> {
for field in iter.into_iter() {
if key.matches(&field.parse()?) {
return Ok(true);
}
}
return Ok(false);
}
#[derive(Debug, Clone)]
pub struct Policy(pub policy::Policy);
pub struct Costly<T> {
root: T,
cost: f64,
}
impl<T> From<T> for Costly<T> {
fn from(root: T) -> Self {
Self { root, cost: 0. }
}
}
impl ExprTransform for WhitelistPolicy {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error> {
match expr.as_ref() {
Expr::Column(Column(context_key)) => {
if matches_in(self.fields.iter(), &context_key)? {
Ok(expr.clone().into())
} else {
Err(Error::NoMatch)
}
}
_ => Err(Error::NoMatch),
}
}
}
impl ExprTransform for HashPolicy {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error> {
match expr.as_ref() {
Expr::Column(Column(context_key)) => {
if matches_in(self.fields.iter(), &context_key)? {
Ok(ExprT::from(Expr::As(As {
expr: ExprT::from(Expr::Hash(Hash {
algo: HashAlgorithm::default(),
expr: expr.clone(),
salt: self.salt.clone(),
})),
alias: context_key.name().to_string(),
}))
.into())
} else {
Err(Error::NoMatch)
}
}
_ => Err(Error::NoMatch),
}
}
}
impl ExprTransform for ObfuscatePolicy {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error> {
match expr.as_ref() {
Expr::Column(Column(context_key)) => {
if matches_in(self.fields.iter(), &context_key)? {
let expr = ExprT::from(Expr::Literal(Literal(LiteralValue::Null)));
let alias = context_key.name().to_string();
Ok(ExprT::from(Expr::As(As { expr, alias })).into())
} else {
Err(Error::NoMatch)
}
}
_ => Err(Error::NoMatch),
}
}
}
impl ExprTransform for Policy {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error> {
match &self.0 {
policy::Policy::Whitelist(whitelist) => whitelist.transform_expr(expr),
policy::Policy::Hash(hash) => hash.transform_expr(expr),
policy::Policy::Obfuscate(obfuscate) => obfuscate.transform_expr(expr),
_ => Err(Error::NoMatch),
}
}
}
#[async_trait]
impl RelTransform for DifferentialPrivacyPolicy {
async fn transform_rel<A: Access>(
&self,
rel: &RelT,
access: &A,
) -> Result<Costly<RelT>, Error> {
match rel.as_ref() {
GenericRel::Aggregation(Aggregation {
attributes,
group_by,
from,
}) => {
// FIXME: This could be optimized
let getter = FlexTableMetaGetter {
primary: self.entity.clone(),
access,
};
let flex = getter.rebase(rel).await;
if let Err(err) = flex.board.as_ref() {
trace!("rebase lead to incorrect tree, dropping match: {}", err);
return Err(Error::NoMatch);
}
let (flex_attributes, flex_group_by, flex_from) = match flex.as_ref() {
GenericRel::Aggregation(Aggregation {
attributes,
group_by,
from,
}) => (attributes, group_by, from),
_ => unreachable!(),
};
let mut factor = 1.;
let mut grouping_keys = HashSet::new();
for (expr, flex_expr) in group_by.iter().zip(flex_group_by.iter()) {
if let Expr::Column(Column(column_key)) = expr.as_ref() {
grouping_keys.insert(column_key);
let col_maximum_frequency = flex_expr
.board
.as_ref()
.map_err(|e| e.clone())?
.domain_sensitivity
.maximum_frequency
.0
.ok_or(Error::NoMatch)?;
factor *= col_maximum_frequency as f64;
} else {
return Err(Error::NoMatch);
}
if flex_expr.board.as_ref().map_err(|e| e.clone())?.taint.0 {
return Err(Error::NoMatch);
}
}
let bucket_alias = "__bucket_count";
let bucket_key = ContextKey::with_name(bucket_alias);
let maximum_frequency = flex_from
.board
.as_ref()
.map_err(|e| e.clone())?
.primary
.maximum_frequency
.0
.ok_or(Error::NoMatch)?;
let threshold = (self.bucket_size * maximum_frequency) as i64;
let one = ExprT::from(Expr::Literal(Literal(LiteralValue::Long(1))));
// this cost is per row
let mut cost = 0.;
let mut new_attributes = Vec::new();
let mut projection_attributes = Vec::new();
for (i, (expr, flex_expr)) in
attributes.iter().zip(flex_attributes.iter()).enumerate()
{
match expr.as_ref() {
Expr::Column(Column(column_key)) => {
if !grouping_keys.contains(&column_key) {
return Err(Error::NoMatch);
}
new_attributes.push(ExprT::from(Expr::As(As {
expr: expr.clone(),
alias: column_key.name().to_string(),
})));
projection_attributes.push(expr.clone());
}
Expr::Function(Function {
name,
args,
distinct,
}) => {
// assuming function is aggregation
let board = flex_expr.board.as_ref().map_err(|e| e.clone())?;
let sensitivity = board
.domain_sensitivity
.sensitivity
.0
.ok_or(Error::NoMatch)?;
let distribution = Distribution::Laplace {
mean: 0.,
variance: sensitivity / self.epsilon,
};
cost += self.epsilon;
let alias = format!("f{}_", i);
let new_expr = ExprT::from(Expr::As(As {
expr: ExprT::from(Expr::Noisy(Noisy {
expr: expr.clone(),
distribution,
})),
alias: alias.clone(),
}));
new_attributes.push(new_expr);
let alias_as_col =
ExprT::from(Expr::Column(Column(ContextKey::with_name(&alias))));
projection_attributes.push(alias_as_col);
}
_ => return Err(Error::NoMatch),
}
}
new_attributes.push(ExprT::from(Expr::As(As {
expr: ExprT::from(Expr::Noisy(Noisy {
expr: ExprT::from(Expr::Function(Function {
name: FunctionName::Count,
args: vec![one.clone()],
distinct: false,
})),
distribution: Distribution::Laplace {
mean: 0.,
variance: 1. / self.epsilon,
},
})),
alias: bucket_alias.to_string(),
})));
let noised_root = RelT::from(GenericRel::Aggregation(Aggregation {
attributes: new_attributes,
group_by: group_by.clone(),
from: from.clone(),
}));
let where_bucket_count = ExprT::from(Expr::BinaryOp(BinaryOp {
op: BinaryOperator::Gt,
left: ExprT::from(Expr::Column(Column(bucket_key))),
right: { ExprT::from(Expr::Literal(Literal(LiteralValue::Long(threshold)))) },
}));
let new_root = RelT::from(GenericRel::Projection(Projection {
from: RelT::from(GenericRel::Selection(Selection {
from: noised_root,
where_: where_bucket_count,
})),
attributes: projection_attributes,
}));
let ctx = access.context().await.unwrap();
let new_root = RebaseRel::<'_, TableMeta>::rebase(&ctx, &new_root).await; // repair it
Ok(Costly {
root: new_root,
cost,
})
}
_ => Err(Error::NoMatch),
}
}
}
#[async_trait]
impl RelTransform for AggregationPolicy {
async fn transform_rel<A: Access>(
&self,
rel: &RelT,
access: &A,
) -> Result<Costly<RelT>, Error> {
match rel.as_ref() {
GenericRel::Aggregation(Aggregation {
attributes,
group_by,
from,
}) => {
let entity_key = ContextKey::with_name(&self.entity);
let entity_alias_str = format!("policy_{}", entity_key.name());
let entity_alias = ContextKey::with_name(&entity_alias_str);
let ctx = access.context().await.unwrap();
let rewritten: RelT = rel
.clone()
.try_fold(&mut |child| match child {
GenericRel::Table(Table(context_key)) => {
let table_meta = ctx.get(&context_key).unwrap();
let columns = table_meta.to_context();
if columns.get_column(&entity_key).is_ok() {
Ok(RelT {
root: GenericRel::Table(Table(context_key)),
board: Ok(table_meta.clone()),
})
} else {
Err(Error::NoMatch)
}
}
GenericRel::Projection(Projection { attributes, from }) => {
let mut attributes = attributes.clone();
attributes.push(ExprT::from(Expr::Column(Column(entity_key.clone()))));
Ok(RelT::from(GenericRel::Projection(Projection {
attributes,
from,
})))
}
GenericRel::Aggregation(Aggregation {
attributes,
from,
group_by,
}) => {
let mut attributes = attributes
.iter()
.cloned()
.enumerate()
.map(|(i, expr)| {
ExprT::from(Expr::As(As {
expr,
alias: format!("f{}_", i),
}))
})
.collect::<Vec<_>>();
attributes.push(ExprT::from(Expr::As(As {
expr: ExprT::from(Expr::Function(Function {
name: FunctionName::Count,
args: vec![ExprT::from(Expr::Column(Column(
entity_key.clone(),
)))],
distinct: true,
})),
alias: entity_alias.name().to_string(),
})));
Ok(RelT::from(GenericRel::Aggregation(Aggregation {
attributes,
from,
group_by,
})))
}
_ => Ok(RelT::from(child)),
})
.unwrap();
let rewritten = RebaseRel::<'_, TableMeta>::rebase(&ctx, &rewritten).await; // repair it
let board = rewritten.board.as_ref().map_err(|_| Error::NoMatch)?;
if board.to_context().get(&entity_alias).is_ok() {
let where_ = ExprT::from(Expr::BinaryOp(BinaryOp {
left: ExprT::from(Expr::Column(Column(entity_alias))),
op: BinaryOperator::Gt,
right: ExprT::from(Expr::Literal(Literal(LiteralValue::Long(
self.minimum_bucket_size as i64,
)))),
}));
let num_cols = board.columns.len();
let new_root = RelT::from(GenericRel::Projection(Projection {
from: RelT::from(GenericRel::Selection(Selection {
from: rewritten,
where_,
})),
attributes: {
(0..(num_cols - 1))
.into_iter()
.map(|i| {
let context_key = ContextKey::with_name(&format!("f{}_", i));
ExprT::from(Expr::Column(Column(context_key)))
})
.collect::<Vec<_>>()
},
}));
let new_root = RebaseRel::<'_, TableMeta>::rebase(&ctx, &new_root).await;
Ok(new_root.into())
} else {
Err(Error::NoMatch)
}
}
_ => Err(Error::NoMatch),
}
}
}
#[async_trait]
impl RelTransform for Policy {
async fn transform_rel<A: Access>(
&self,
rel: &RelT,
access: &A,
) -> Result<Costly<RelT>, Error> {
match &self.0 {
policy::Policy::DifferentialPrivacy(differential_privacy) => {
differential_privacy.transform_rel(rel, access).await
}
policy::Policy::Aggregation(aggregation) => {
aggregation.transform_rel(rel, access).await
}
_ => Err(Error::NoMatch),
}
}
}
#[derive(derive_more::From, Debug)]
pub enum Error {
NoMatch,
Validate(ValidateError),
}
pub trait ExprTransform {
fn transform_expr(&self, expr: &ExprT) -> Result<Costly<ExprT>, Error>;
}
#[async_trait]
pub trait RelTransform {
async fn transform_rel<A: Access>(&self, rel: &RelT, access: &A)
-> Result<Costly<RelT>, Error>;
}
#[derive(Clone, Debug)]
pub struct | {
pub policies: Vec<Policy>,
pub priority: u64,
pub budget: Option<PolicyBudget>,
}
impl PolicyBinding {
fn is_in_budget(&self, proposed: f64) -> bool {
self.budget
.as_ref()
.map(|PolicyBudget { maximum, used, .. }| used + proposed <= *maximum)
.unwrap_or(true)
}
}
pub struct RelTransformer<'a, A> {
bindings: &'a Context<PolicyBinding>,
audience: &'a BlockType,
access: &'a A,
}
impl<'a, A> RelTransformer<'a, A>
where
A: Access,
{
pub fn new(
bindings: &'a Context<PolicyBinding>,
audience: &'a BlockType,
access: &'a A,
) -> Self {
debug!(
"initializing relation transformer for {} with bindings={:?}",
audience, bindings
);
Self {
bindings,
audience,
access,
}
}
/// Filter the policy bindings that apply to the `context_key`
fn filter_bindings<'b>(&'b self, context_key: &'b ContextKey) -> Context<&'a PolicyBinding> {
debug!("sifting policies for {}", context_key);
self.bindings
.iter()
.filter_map(move |(key, binding)| {
if key.prefix_matches(context_key) {
Some((key.clone(), binding))
} else {
None
}
})
.collect()
}
pub fn transform_rel<'b>(
&'b self,
rel_t: &'b RelT,
) -> Pin<Box<dyn Future<Output = Result<Transformed<RelT>, Error>> + Send + 'b>> {
async move {
let unraveled = rel_t.root.map(&mut |child| child.as_ref());
let proposed = match unraveled {
Rel::Projection(Projection {
mut attributes,
from:
RelT {
root: Rel::Table(Table(context_key)),
board,
},
}) => {
debug!("potential expr leaf policy condition met");
let from = RelT {
root: Rel::Table(Table(context_key.clone())),
board: board.clone(),
};
let bindings = self.filter_bindings(context_key);
debug!("bindings filtered to {:?}", bindings);
let mut cost = HashMap::new();
let mut priority = 0;
let expr_transformer = ExprTransformer::new(&bindings, &self.audience);
for expr_t in attributes.iter_mut() {
match expr_transformer.transform_expr(expr_t) {
Ok(transformed) => {
debug!("successfully transformed expression");
transformed.add_to(&mut cost);
*expr_t = transformed.root;
priority = max(priority, transformed.priority);
}
Err(Error::NoMatch) => {}
Err(err) => return Err(err),
}
}
let root = RelT::from(Rel::Projection(Projection { attributes, from }));
debug!("rebuilt leaf relation node {:?}", root);
let audience = root
.board
.as_ref()
.map(|board| &board.audience)
.map_err(|e| Error::Validate(e.clone()))?;
debug!(
"after transformation of expression, audience: {:?}",
audience
);
if audience.contains(&self.audience) {
vec![Transformed {
root,
cost,
priority,
}]
} else {
vec![]
}
}
_ => {
let provenance = rel_t
.board
.as_ref()
.map_err(|e| Error::Validate(e.clone()))?
.provenance
.as_ref();
if let Some(provenance) = provenance {
let bindings = self.filter_bindings(provenance);
let mut candidates = Vec::new();
for (key, binding) in bindings.iter() {
for policy in binding.policies.iter() {
match policy.transform_rel(rel_t, self.access).await {
Ok(Costly { mut root, cost }) => {
root.board
.as_mut()
.map(|board| {
board.audience.insert(self.audience.clone())
})
.map_err(|e| Error::Validate(e.clone()))?;
let transformed =
Transformed::new(root, key, cost, binding.priority);
candidates.push(transformed);
}
Err(Error::NoMatch) => {}
Err(err) => return Err(err),
}
}
}
candidates
} else {
vec![]
}
}
};
if let Some(best) = Transformed::best_candidate(proposed) {
debug!("best candidate for relation: {:?}", best);
Ok(best)
} else {
debug!("no candidate for relation at this level");
if rel_t.is_leaf() {
debug!("leaf relation attained, no match");
return Err(Error::NoMatch);
}
let state = Mutex::new((HashMap::new(), 0u64));
let state_ref = &state;
let root = RelT::from(
rel_t
.root
.map_async(async move |child| {
self.transform_rel(child).await.map(|transformed| {
let mut state = state_ref.lock().unwrap();
transformed.add_to(&mut state.0);
state.1 = max(state.1, transformed.priority);
transformed.root
})
})
.await
.into_result()?,
);
let state_ = state.lock().unwrap();
let transformed = Transformed {
root,
cost: state_.0.clone(),
priority: state_.1,
};
debug!("from level below, got best relation tree {:?}", transformed);
Ok(transformed)
}
}
.boxed()
}
}
pub struct ExprTransformer<'a> {
bindings: &'a Context<&'a PolicyBinding>,
audience: &'a BlockType,
}
impl<'a> ExprTransformer<'a> {
fn new(bindings: &'a Context<&'a PolicyBinding>, audience: &'a BlockType) -> Self {
Self { bindings, audience }
}
fn transform_expr(&self, expr_t: &ExprT) -> Result<Transformed<ExprT>, Error> {
let mut proposed = Vec::new();
for (key, binding) in self.bindings.iter() {
let priority = binding.priority;
for policy in binding.policies.iter() {
match policy.transform_expr(expr_t) {
Ok(Costly { mut root, cost }) => {
root.board
.as_mut()
.map(|board| {
board.audience.insert(self.audience.clone());
})
.map_err(|e| Error::Validate(e.clone()))?;
let transformed = Transformed::new(root, key, cost, priority);
proposed.push(transformed);
}
Err(Error::NoMatch) => {}
Err(err) => return Err(err),
}
}
}
if let Some(best) = Transformed::best_candidate(proposed) {
// select the best strategy
Ok(best)
} else {
// no match so far, let's try deeper
if expr_t.is_leaf() {
return Err(Error::NoMatch);
}
let mut cost = HashMap::new();
let mut priority = 0;
let root = ExprT::from(
expr_t
.root
.map(&mut |child| {
self.transform_expr(child).map(|transformed| {
transformed.add_to(&mut cost);
priority = max(priority, transformed.priority);
transformed.root
})
})
.into_result()?,
);
Ok(Transformed {
root,
cost,
priority,
})
}
}
}
#[derive(Debug, Clone)]
pub struct Transformed<T> {
pub root: T,
pub cost: HashMap<ContextKey, f64>,
pub priority: u64,
}
impl<T> Transformed<T> {
pub fn default(root: T) -> Self {
Self {
root,
cost: HashMap::new(),
priority: 0,
}
}
fn new(root: T, binding_key: &ContextKey, cost: f64, priority: u64) -> Self {
Self {
root,
cost: {
let mut cost_ = HashMap::new();
cost_.insert(binding_key.clone(), cost);
cost_
},
priority,
}
}
pub fn into_inner(self) -> T {
self.root
}
fn best_candidate<I>(iter: I) -> Option<Self>
where
I: IntoIterator<Item = Self>,
{
let proposed: Vec<_> = iter.into_iter().collect();
let highest = proposed
.iter()
.max_by(|l, r| l.priority.cmp(&r.priority))
.map(|highest| highest.priority)?;
let candidates = proposed
.into_iter()
.filter(|t| t.priority == highest)
.collect::<Vec<_>>();
let best = candidates
.into_iter()
.min_by(|l, r| l.total_cost().partial_cmp(&r.total_cost()).unwrap())
.unwrap();
Some(best)
}
fn total_cost(&self) -> f64 {
self.cost.values().sum()
}
fn add_to(&self, costs: &mut HashMap<ContextKey, f64>) {
for (key, cost) in self.cost.iter() {
*costs.entry(key.clone()).or_default() += cost;
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
use crate::node::state::tests::read_manifest;
use crate::node::tests::mk_node;
use crate::opt::validate::Validator;
use tokio::runtime::Runtime;
use parallax_api::block_type;
fn test_transform_for(query: &str) -> Transformed<RelT> {
let random_scope = uuid::Uuid::new_v4().to_simple().to_string();
let access = Arc::new(mk_node(&random_scope));
for resource in read_manifest().into_iter() {
access.create_resource(resource).unwrap();
}
Runtime::new().unwrap().block_on(async {
let ctx = access.context().await.unwrap();
let validator = Validator::new(&ctx);
let policies = access.policies_for_group("wheel").unwrap();
let rel_t = validator.validate_str(query).unwrap();
let audience = block_type!("resource"."group"."wheel");
let transformer = RelTransformer::new(&policies, &audience, &access);
let rel_t = transformer
.transform_rel(&rel_t)
.await
.or_else(|error| match error {
super::Error::NoMatch => Ok(Transformed::default(rel_t)),
super::Error::Validate(err) => Err(err),
})
.unwrap();
rel_t
})
}
#[test]
fn transform_blocked() {
let rel_t = test_transform_for(
"\
SELECT person_id FROM patient_data.person
",
)
.into_inner();
let table_meta = rel_t.board.unwrap();
assert!(table_meta.audience.is_empty())
}
#[test]
fn transform_whitelist() {
let rel_t = test_transform_for(
"\
SELECT vocabulary_id FROM patient_data.vocabulary
",
)
.into_inner();
let table_meta = rel_t.board.unwrap();
assert!(table_meta
.audience
.contains(&block_type!("resource"."group"."wheel")))
}
use crate::opt::expr::As;
#[test]
fn transform_obfuscation() {
let rel_t = test_transform_for(
"\
SELECT address_1 FROM patient_data.location
",
)
.into_inner();
let table_meta = rel_t.board.unwrap();
assert!(table_meta
.audience
.contains(&block_type!("resource"."group"."wheel")));
match rel_t.root {
Rel::Projection(Projection { attributes, .. }) => {
match attributes[0]
.as_ref()
.map_owned(&mut |child| child.as_ref())
{
Expr::As(As {
expr: Expr::Literal(Literal(LiteralValue::Null)),
alias,
}) => assert_eq!(alias, "address_1".to_string()),
_ => panic!("`review_id` was not obfuscated"),
}
}
_ => unreachable!(),
}
}
#[test]
fn transform_hash() {
let rel_t = test_transform_for(
"\
SELECT care_site_name FROM patient_data.care_site
",
)
.into_inner();
let table_meta = rel_t.board.unwrap();
assert!(table_meta
.audience
.contains(&block_type!("resource"."group"."wheel")));
match rel_t.root {
Rel::Projection(Projection { attributes, .. }) => {
match attributes[0]
.as_ref()
.map_owned(&mut |child| child.as_ref())
{
Expr::As(As { expr, .. }) => match expr {
Expr::Hash(..) => {}
_ => panic!("`care_site_name` was not hashed"),
},
_ => panic!("`care_site_name` was not hashed"),
}
}
_ => unreachable!(),
}
}
#[test]
fn transform_diff_priv() {
let rel_t = test_transform_for(
"\
SELECT gender_concept_id, COUNT(person_id) \
FROM patient_data.person \
GROUP BY gender_concept_id
",
);
// For now this is enough in order to check that diff priv was triggered
// as it is the only policy with an associated cost
assert!(*rel_t.cost.values().next().unwrap() > 0f64);
}
#[test]
fn transform_aggregation() {
let rel_t = test_transform_for(
"\
SELECT state, COUNT(DISTINCT location_id) \
FROM patient_data.location \
GROUP BY state \
",
);
let table_meta = rel_t.root.board.unwrap();
assert!(table_meta
.audience
.contains(&block_type!("resource"."group"."wheel")));
}
}
| PolicyBinding |
module_b_2.test-mojom.js | // mojo/public/js/test/module_b_2.test-mojom.js is auto generated by mojom_bindings_generator.py, do not edit
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
'use strict';
(function() {
var mojomId = 'mojo/public/js/test/module_b_2.test-mojom';
if (mojo.internal.isMojomLoaded(mojomId)) {
console.warn('The following mojom is loaded multiple times: ' + mojomId);
return;
}
mojo.internal.markMojomLoaded(mojomId);
var bindings = mojo;
var associatedBindings = mojo;
var codec = mojo.internal;
var validator = mojo.internal;
var exports = mojo.internal.exposeNamespace('moduleB');
var module_a$ =
mojo.internal.exposeNamespace('moduleA');
if (mojo.config.autoLoadMojomDeps) {
mojo.internal.loadMojomIfNecessary(
'mojo/public/js/test/module_a.test-mojom', 'module_a.test-mojom.js');
}
var module_b_1$ =
mojo.internal.exposeNamespace('moduleB');
if (mojo.config.autoLoadMojomDeps) {
mojo.internal.loadMojomIfNecessary(
'mojo/public/js/test/module_b_1.test-mojom', 'module_b_1.test-mojom.js');
}
function TestStructB2(values) {
this.initDefaults_();
this.initFields_(values);
}
TestStructB2.prototype.initDefaults_ = function() {
this.a1 = null;
this.a2 = null;
this.b2 = null;
};
TestStructB2.prototype.initFields_ = function(fields) {
for(var field in fields) {
if (this.hasOwnProperty(field))
this[field] = fields[field];
}
};
TestStructB2.validate = function(messageValidator, offset) {
var err;
err = messageValidator.validateStructHeader(offset, codec.kStructHeaderSize);
if (err !== validator.validationError.NONE)
return err;
var kVersionSizes = [
{version: 0, numBytes: 32}
];
err = messageValidator.validateStructVersion(offset, kVersionSizes);
if (err !== validator.validationError.NONE)
return err;
// validate TestStructB2.a1
err = messageValidator.validateStructPointer(offset + codec.kStructHeaderSize + 0, module_a$.TestStructA1, false);
if (err !== validator.validationError.NONE)
return err;
// validate TestStructB2.a2
err = messageValidator.validateStructPointer(offset + codec.kStructHeaderSize + 8, module_a$.TestStructA2, false);
if (err !== validator.validationError.NONE)
return err;
// validate TestStructB2.b2
err = messageValidator.validateStructPointer(offset + codec.kStructHeaderSize + 16, TestStructB2, false);
if (err !== validator.validationError.NONE)
return err;
return validator.validationError.NONE;
};
TestStructB2.encodedSize = codec.kStructHeaderSize + 24;
TestStructB2.decode = function(decoder) {
var packed;
var val = new TestStructB2();
var numberOfBytes = decoder.readUint32();
var version = decoder.readUint32();
val.a1 =
decoder.decodeStructPointer(module_a$.TestStructA1);
val.a2 =
decoder.decodeStructPointer(module_a$.TestStructA2);
val.b2 =
decoder.decodeStructPointer(TestStructB2);
return val;
};
TestStructB2.encode = function(encoder, val) {
var packed;
encoder.writeUint32(TestStructB2.encodedSize);
encoder.writeUint32(0);
encoder.encodeStructPointer(module_a$.TestStructA1, val.a1);
encoder.encodeStructPointer(module_a$.TestStructA2, val.a2);
encoder.encodeStructPointer(TestStructB2, val.b2);
};
function TestInterface_PassA1_Params(values) {
this.initDefaults_();
this.initFields_(values);
}
TestInterface_PassA1_Params.prototype.initDefaults_ = function() {
this.a1 = null;
};
TestInterface_PassA1_Params.prototype.initFields_ = function(fields) {
for(var field in fields) {
if (this.hasOwnProperty(field))
this[field] = fields[field];
}
};
TestInterface_PassA1_Params.validate = function(messageValidator, offset) {
var err;
err = messageValidator.validateStructHeader(offset, codec.kStructHeaderSize);
if (err !== validator.validationError.NONE)
return err;
var kVersionSizes = [
{version: 0, numBytes: 16}
];
err = messageValidator.validateStructVersion(offset, kVersionSizes);
if (err !== validator.validationError.NONE)
return err;
// validate TestInterface_PassA1_Params.a1
err = messageValidator.validateStructPointer(offset + codec.kStructHeaderSize + 0, module_a$.TestStructA1, false);
if (err !== validator.validationError.NONE)
return err;
return validator.validationError.NONE;
};
TestInterface_PassA1_Params.encodedSize = codec.kStructHeaderSize + 8;
TestInterface_PassA1_Params.decode = function(decoder) {
var packed;
var val = new TestInterface_PassA1_Params();
var numberOfBytes = decoder.readUint32();
var version = decoder.readUint32();
val.a1 =
decoder.decodeStructPointer(module_a$.TestStructA1);
return val;
};
TestInterface_PassA1_Params.encode = function(encoder, val) {
var packed;
encoder.writeUint32(TestInterface_PassA1_Params.encodedSize);
encoder.writeUint32(0);
encoder.encodeStructPointer(module_a$.TestStructA1, val.a1);
};
function TestInterface_PassB1_Params(values) {
this.initDefaults_();
this.initFields_(values);
}
TestInterface_PassB1_Params.prototype.initDefaults_ = function() {
this.b1 = null;
};
TestInterface_PassB1_Params.prototype.initFields_ = function(fields) {
for(var field in fields) {
if (this.hasOwnProperty(field))
this[field] = fields[field];
}
};
TestInterface_PassB1_Params.validate = function(messageValidator, offset) {
var err;
err = messageValidator.validateStructHeader(offset, codec.kStructHeaderSize);
if (err !== validator.validationError.NONE)
return err;
var kVersionSizes = [
{version: 0, numBytes: 16}
];
err = messageValidator.validateStructVersion(offset, kVersionSizes);
if (err !== validator.validationError.NONE)
return err;
// validate TestInterface_PassB1_Params.b1
err = messageValidator.validateStructPointer(offset + codec.kStructHeaderSize + 0, module_b_1$.TestStructB1, false);
if (err !== validator.validationError.NONE)
return err;
return validator.validationError.NONE;
};
TestInterface_PassB1_Params.encodedSize = codec.kStructHeaderSize + 8;
TestInterface_PassB1_Params.decode = function(decoder) {
var packed;
var val = new TestInterface_PassB1_Params();
var numberOfBytes = decoder.readUint32();
var version = decoder.readUint32();
val.b1 =
decoder.decodeStructPointer(module_b_1$.TestStructB1);
return val;
};
TestInterface_PassB1_Params.encode = function(encoder, val) {
var packed;
encoder.writeUint32(TestInterface_PassB1_Params.encodedSize);
encoder.writeUint32(0);
encoder.encodeStructPointer(module_b_1$.TestStructB1, val.b1);
};
function TestInterface_PassB2_Params(values) {
this.initDefaults_();
this.initFields_(values);
}
TestInterface_PassB2_Params.prototype.initDefaults_ = function() {
this.b2 = null;
};
TestInterface_PassB2_Params.prototype.initFields_ = function(fields) {
for(var field in fields) {
if (this.hasOwnProperty(field))
this[field] = fields[field];
}
};
TestInterface_PassB2_Params.validate = function(messageValidator, offset) {
var err;
err = messageValidator.validateStructHeader(offset, codec.kStructHeaderSize);
if (err !== validator.validationError.NONE)
return err;
var kVersionSizes = [
{version: 0, numBytes: 16}
];
err = messageValidator.validateStructVersion(offset, kVersionSizes);
if (err !== validator.validationError.NONE)
return err;
// validate TestInterface_PassB2_Params.b2
err = messageValidator.validateStructPointer(offset + codec.kStructHeaderSize + 0, TestStructB2, false);
if (err !== validator.validationError.NONE)
return err;
return validator.validationError.NONE;
};
TestInterface_PassB2_Params.encodedSize = codec.kStructHeaderSize + 8;
TestInterface_PassB2_Params.decode = function(decoder) {
var packed;
var val = new TestInterface_PassB2_Params();
var numberOfBytes = decoder.readUint32();
var version = decoder.readUint32();
val.b2 =
decoder.decodeStructPointer(TestStructB2);
return val;
};
TestInterface_PassB2_Params.encode = function(encoder, val) {
var packed;
encoder.writeUint32(TestInterface_PassB2_Params.encodedSize);
encoder.writeUint32(0);
encoder.encodeStructPointer(TestStructB2, val.b2);
};
var kTestInterface_PassA1_Name = 490516208;
var kTestInterface_PassB1_Name = 2037498148;
var kTestInterface_PassB2_Name = 212424705;
function TestInterfacePtr(handleOrPtrInfo) {
this.ptr = new bindings.InterfacePtrController(TestInterface,
handleOrPtrInfo);
}
function TestInterfaceAssociatedPtr(associatedInterfacePtrInfo) {
this.ptr = new associatedBindings.AssociatedInterfacePtrController(
TestInterface, associatedInterfacePtrInfo);
}
TestInterfaceAssociatedPtr.prototype =
Object.create(TestInterfacePtr.prototype);
TestInterfaceAssociatedPtr.prototype.constructor =
TestInterfaceAssociatedPtr;
function | (receiver) {
this.receiver_ = receiver;
}
TestInterfacePtr.prototype.passA1 = function() {
return TestInterfaceProxy.prototype.passA1
.apply(this.ptr.getProxy(), arguments);
};
TestInterfaceProxy.prototype.passA1 = function(a1) {
var params_ = new TestInterface_PassA1_Params();
params_.a1 = a1;
var builder = new codec.MessageV0Builder(
kTestInterface_PassA1_Name,
codec.align(TestInterface_PassA1_Params.encodedSize));
builder.encodeStruct(TestInterface_PassA1_Params, params_);
var message = builder.finish();
this.receiver_.accept(message);
};
TestInterfacePtr.prototype.passB1 = function() {
return TestInterfaceProxy.prototype.passB1
.apply(this.ptr.getProxy(), arguments);
};
TestInterfaceProxy.prototype.passB1 = function(b1) {
var params_ = new TestInterface_PassB1_Params();
params_.b1 = b1;
var builder = new codec.MessageV0Builder(
kTestInterface_PassB1_Name,
codec.align(TestInterface_PassB1_Params.encodedSize));
builder.encodeStruct(TestInterface_PassB1_Params, params_);
var message = builder.finish();
this.receiver_.accept(message);
};
TestInterfacePtr.prototype.passB2 = function() {
return TestInterfaceProxy.prototype.passB2
.apply(this.ptr.getProxy(), arguments);
};
TestInterfaceProxy.prototype.passB2 = function(b2) {
var params_ = new TestInterface_PassB2_Params();
params_.b2 = b2;
var builder = new codec.MessageV0Builder(
kTestInterface_PassB2_Name,
codec.align(TestInterface_PassB2_Params.encodedSize));
builder.encodeStruct(TestInterface_PassB2_Params, params_);
var message = builder.finish();
this.receiver_.accept(message);
};
function TestInterfaceStub(delegate) {
this.delegate_ = delegate;
}
TestInterfaceStub.prototype.passA1 = function(a1) {
return this.delegate_ && this.delegate_.passA1 && this.delegate_.passA1(a1);
}
TestInterfaceStub.prototype.passB1 = function(b1) {
return this.delegate_ && this.delegate_.passB1 && this.delegate_.passB1(b1);
}
TestInterfaceStub.prototype.passB2 = function(b2) {
return this.delegate_ && this.delegate_.passB2 && this.delegate_.passB2(b2);
}
TestInterfaceStub.prototype.accept = function(message) {
var reader = new codec.MessageReader(message);
switch (reader.messageName) {
case kTestInterface_PassA1_Name:
var params = reader.decodeStruct(TestInterface_PassA1_Params);
this.passA1(params.a1);
return true;
case kTestInterface_PassB1_Name:
var params = reader.decodeStruct(TestInterface_PassB1_Params);
this.passB1(params.b1);
return true;
case kTestInterface_PassB2_Name:
var params = reader.decodeStruct(TestInterface_PassB2_Params);
this.passB2(params.b2);
return true;
default:
return false;
}
};
TestInterfaceStub.prototype.acceptWithResponder =
function(message, responder) {
var reader = new codec.MessageReader(message);
switch (reader.messageName) {
default:
return false;
}
};
function validateTestInterfaceRequest(messageValidator) {
var message = messageValidator.message;
var paramsClass = null;
switch (message.getName()) {
case kTestInterface_PassA1_Name:
if (!message.expectsResponse() && !message.isResponse())
paramsClass = TestInterface_PassA1_Params;
break;
case kTestInterface_PassB1_Name:
if (!message.expectsResponse() && !message.isResponse())
paramsClass = TestInterface_PassB1_Params;
break;
case kTestInterface_PassB2_Name:
if (!message.expectsResponse() && !message.isResponse())
paramsClass = TestInterface_PassB2_Params;
break;
}
if (paramsClass === null)
return validator.validationError.NONE;
return paramsClass.validate(messageValidator, messageValidator.message.getHeaderNumBytes());
}
function validateTestInterfaceResponse(messageValidator) {
return validator.validationError.NONE;
}
var TestInterface = {
name: 'module_b.TestInterface',
kVersion: 0,
ptrClass: TestInterfacePtr,
proxyClass: TestInterfaceProxy,
stubClass: TestInterfaceStub,
validateRequest: validateTestInterfaceRequest,
validateResponse: null,
};
TestInterfaceStub.prototype.validator = validateTestInterfaceRequest;
TestInterfaceProxy.prototype.validator = null;
exports.TestStructB2 = TestStructB2;
exports.TestInterface = TestInterface;
exports.TestInterfacePtr = TestInterfacePtr;
exports.TestInterfaceAssociatedPtr = TestInterfaceAssociatedPtr;
})(); | TestInterfaceProxy |
line_fitting.rs | use arrsac::Arrsac;
use levenberg_marquardt::{LeastSquaresProblem, LevenbergMarquardt};
use nalgebra::{
dimension::{U1, U2},
storage::Owned,
Dim, Dynamic, Matrix, Matrix2, OMatrix, VecStorage, Vector2,
};
use pcg_rand::Pcg64;
use rand::distributions::Uniform;
use rand::{distributions::Distribution, Rng};
use sample_consensus::{Consensus, Estimator, Model};
type F = f64;
const LINES_TO_ESTIMATE: usize = 1000;
#[derive(Debug, Clone)]
struct Line {
normal_angle: F,
c: F,
}
impl Line {
fn xy_residuals(&self, point: Vector2<F>) -> Vector2<F> {
let normal = self.normal();
(self.c - normal.dot(&point)) * normal
}
/// This takes in a point and computes the Jacobian of the vector from
/// the point projected onto the line to the point itself. The
/// Jacobian is computed in respect to the model itself.
#[rustfmt::skip]
fn jacobian(&self, point: Vector2<F>) -> Matrix2<F> {
let n = self.normal();
let nd = Vector2::new(-self.normal_angle.sin(), self.normal_angle.cos());
let c = self.c;
let dist_d_angle = (c - n.dot(&point)) * nd - n * point.dot(&nd);
Matrix2::new(
dist_d_angle[0], n[0],
dist_d_angle[1], n[1],
)
}
fn into_vec(self) -> Vector2<F> {
Vector2::new(self.normal_angle, self.c)
}
fn from_vec(v: Vector2<F>) -> Self {
Self {
normal_angle: v[0],
c: v[1],
}
}
fn norm_cosine_distance(&self, other: &Self) -> F {
1.0 - self.normal().dot(&other.normal()).abs()
}
fn normal(&self) -> Vector2<F> {
Vector2::new(self.normal_angle.cos(), self.normal_angle.sin())
}
}
impl Model<Vector2<F>> for Line {
fn residual(&self, point: &Vector2<F>) -> f64 {
(self.normal().dot(point) - self.c).abs()
}
}
| impl Estimator<Vector2<F>> for LineEstimator {
type Model = Line;
type ModelIter = std::iter::Once<Line>;
const MIN_SAMPLES: usize = 2;
fn estimate<I>(&self, mut data: I) -> Self::ModelIter
where
I: Iterator<Item = Vector2<F>> + Clone,
{
let a = data.next().unwrap();
let b = data.next().unwrap();
let normal = Vector2::new(a.y - b.y, b.x - a.x).normalize();
let c = -normal.dot(&b);
let normal_angle = F::atan2(normal[1], normal[0]);
std::iter::once(Line { normal_angle, c })
}
}
struct LineFittingOptimizationProblem<'a> {
points: &'a [Vector2<F>],
model: Line,
}
impl<'a> LeastSquaresProblem<F, Dynamic, U2> for LineFittingOptimizationProblem<'a> {
type ParameterStorage = Owned<F, U2, U1>;
type JacobianStorage = Owned<F, Dynamic, U2>;
type ResidualStorage = VecStorage<F, Dynamic, U1>;
fn set_params(&mut self, p: &Vector2<F>) {
self.model = Line::from_vec(*p);
}
fn params(&self) -> Vector2<F> {
self.model.clone().into_vec()
}
fn residuals(&self) -> Option<Matrix<F, Dynamic, U1, Self::ResidualStorage>> {
let residual_data = self
.points
.iter()
.flat_map(|&point| {
use std::iter::once;
let vec = self.model.xy_residuals(point);
once(vec.x).chain(once(vec.y))
})
.collect();
Some(Matrix::<F, Dynamic, U1, Self::ResidualStorage>::from_vec(
residual_data,
))
}
fn jacobian(&self) -> Option<OMatrix<F, Dynamic, U2>> {
let u2 = Dim::from_usize(2);
let mut jacobian = OMatrix::zeros_generic(Dynamic::from_usize(self.points.len() * 2), u2);
for (i, point) in self.points.iter().enumerate() {
jacobian
.slice_range_mut(2 * i..2 * (i + 1), ..)
.copy_from(&self.model.jacobian(*point));
}
Some(jacobian)
}
}
#[test]
fn lines() {
let mut rng = Pcg64::new_unseeded();
// The max candidate hypotheses had to be increased dramatically to ensure all 1000 cases find a
// good-fitting line.
let mut arrsac = Arrsac::new(5.0, Pcg64::new_unseeded());
let mut would_have_failed = false;
for _ in 0..LINES_TO_ESTIMATE {
// Generate <a, b> and normalize.
let normal =
Vector2::new(rng.gen_range(-10.0..10.0), rng.gen_range(-10.0..10.0)).normalize();
let normal_angle = F::atan2(normal[1], normal[0]);
// Get parallel ray.
let ray = Vector2::new(normal.y, -normal.x);
// Generate random c.
let c = rng.gen_range(-10.0..10.0);
// Generate random number of points.
let num = rng.gen_range(100..1000);
// The points should be no more than 5.0 away from the line and be evenly distributed away from the line.
let residuals = Uniform::new(-5.0, 5.0);
// The points must be generated along the line, but the distance should be bounded to make it more difficult.
let distances = Uniform::new(-50.0, 50.0);
// Generate the points.
let points: Vec<Vector2<F>> = (0..num)
.map(|_| {
let residual: F = residuals.sample(&mut rng);
let distance: F = distances.sample(&mut rng);
let along = ray * distance;
let against = (residual - c) * normal;
along + against
})
.collect();
let model = arrsac
.model(&LineEstimator, points.iter().copied())
.expect("unable to estimate a model");
// Now perform Levenberg-Marquardt.
let problem = LineFittingOptimizationProblem {
model: model.clone(),
points: &points,
};
let (problem, report) = LevenbergMarquardt::new().minimize(problem);
assert!(report.termination.was_successful());
let real_model = Line { normal_angle, c };
would_have_failed = would_have_failed || model.norm_cosine_distance(&real_model) >= 0.01;
let new_cosine_distance = problem.model.norm_cosine_distance(&real_model);
// Check the slope using the cosine distance.
assert!(new_cosine_distance < 0.001, "slope out of expected range");
}
// test that there were initial guesses that wouldn't have been enough
assert!(would_have_failed);
} | struct LineEstimator;
|
policy_utilities.py | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for bandit policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.utils import common
class InfoFields(object):
"""Strings which can be used in the policy info fields."""
# Mean of predicted rewards (per arm).
PREDICTED_REWARDS_MEAN = 'predicted_rewards_mean'
# Samples of predicted rewards (per arm).
PREDICTED_REWARDS_SAMPLED = 'predicted_rewards_sampled'
# Type of bandit policy (see enumerations in `BanditPolicyType`).
BANDIT_POLICY_TYPE = 'bandit_policy_type'
# Used to store the chosen action for a per-arm model.
CHOSEN_ARM_FEATURES = 'chosen_arm_features'
PolicyInfo = collections.namedtuple( # pylint: disable=invalid-name
'PolicyInfo',
(policy_step.CommonFields.LOG_PROBABILITY,
InfoFields.PREDICTED_REWARDS_MEAN,
InfoFields.PREDICTED_REWARDS_SAMPLED,
InfoFields.BANDIT_POLICY_TYPE))
# Set default empty tuple for all fields.
PolicyInfo.__new__.__defaults__ = ((),) * len(PolicyInfo._fields)
PerArmPolicyInfo = collections.namedtuple( # pylint: disable=invalid-name
'PerArmPolicyInfo',
(policy_step.CommonFields.LOG_PROBABILITY,
InfoFields.PREDICTED_REWARDS_MEAN,
InfoFields.PREDICTED_REWARDS_SAMPLED,
InfoFields.BANDIT_POLICY_TYPE,
InfoFields.CHOSEN_ARM_FEATURES))
# Set default empty tuple for all fields.
PerArmPolicyInfo.__new__.__defaults__ = ((),) * len(PerArmPolicyInfo._fields)
class | (object):
"""Enumeration of bandit policy types."""
# No bandit policy type specified.
UNKNOWN = 0
# Greedy decision made by bandit agent.
GREEDY = 1
# Random decision for exploration made by epsilon-greedy agent sampled from
# uniform distribution over actions.
UNIFORM = 2
def create_bandit_policy_type_tensor_spec(shape):
"""Create tensor spec for bandit policy type."""
return tensor_spec.BoundedTensorSpec(
shape=shape, dtype=tf.int32,
minimum=BanditPolicyType.UNKNOWN, maximum=BanditPolicyType.UNIFORM)
@common.function
def masked_argmax(input_tensor, mask, output_type=tf.int32):
"""Computes the argmax where the allowed elements are given by a mask.
Args:
input_tensor: Rank-2 Tensor of floats.
mask: 0-1 valued Tensor of the same shape as input.
output_type: Integer type of the output.
Returns:
A Tensor of rank 1 and type `output_type`, with the masked argmax of every
row of `input_tensor`.
"""
input_tensor.shape.assert_is_compatible_with(mask.shape)
neg_inf = tf.constant(-float('Inf'), input_tensor.dtype)
tf.compat.v1.assert_equal(
tf.reduce_max(mask, axis=1), tf.constant(1, dtype=mask.dtype))
modified_input = tf.compat.v2.where(
tf.cast(mask, tf.bool), input_tensor, neg_inf)
return tf.argmax(modified_input, axis=-1, output_type=output_type)
def has_bandit_policy_type(info, check_for_tensor=False):
"""Check if policy info has `bandit_policy_type` field/tensor."""
if info in ((), None):
return False
fields = getattr(info, '_fields', None)
has_field = fields is not None and InfoFields.BANDIT_POLICY_TYPE in fields
if has_field and check_for_tensor:
return isinstance(info.bandit_policy_type, tf.Tensor)
else:
return has_field
def set_bandit_policy_type(info, bandit_policy_type):
"""Sets the InfoFields.BANDIT_POLICY_TYPE on info to bandit_policy_type.
If policy `info` does not support InfoFields.BANDIT_POLICY_TYPE, this method
returns `info` as-is (without any modification).
Args:
info: Policy info on which to set bandit policy type.
bandit_policy_type: Tensor containing BanditPolicyType enums or TensorSpec
from `create_bandit_policy_type_tensor_spec()`.
Returns:
Policy info with modified field (if possible).
"""
if info in ((), None):
return PolicyInfo(bandit_policy_type=bandit_policy_type)
fields = getattr(info, '_fields', None)
if fields is not None and InfoFields.BANDIT_POLICY_TYPE in fields:
return info._replace(bandit_policy_type=bandit_policy_type)
try:
info[InfoFields.BANDIT_POLICY_TYPE] = bandit_policy_type
except TypeError:
pass
return info
@common.function
def bandit_policy_uniform_mask(values, mask):
"""Set bandit policy type tensor to BanditPolicyType.UNIFORM based on mask.
Set bandit policy type `values` to BanditPolicyType.UNIFORM; returns tensor
where output[i] is BanditPolicyType.UNIFORM if mask[i] is True, otherwise it
is left as values[i].
Args:
values: Tensor containing `BanditPolicyType` enumerations.
mask: Tensor of the same shape as `values` with boolean flags indicating
values to set to `BanditPolicyType.UNIFORM`.
Returns:
Tensor containing `BanditPolicyType` enumerations with masked values.
"""
tf.compat.v1.assert_equal(tf.shape(mask), tf.shape(values))
return tf.where(
mask, tf.fill(tf.shape(values), BanditPolicyType.UNIFORM), values)
| BanditPolicyType |
bastionhost.go | /*
Copyright The Kubeform Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "kubeform.dev/kubeform/apis/azurerm/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// BastionHostLister helps list BastionHosts.
type BastionHostLister interface {
// List lists all BastionHosts in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.BastionHost, err error)
// BastionHosts returns an object that can list and get BastionHosts.
BastionHosts(namespace string) BastionHostNamespaceLister | BastionHostListerExpansion
}
// bastionHostLister implements the BastionHostLister interface.
type bastionHostLister struct {
indexer cache.Indexer
}
// NewBastionHostLister returns a new BastionHostLister.
func NewBastionHostLister(indexer cache.Indexer) BastionHostLister {
return &bastionHostLister{indexer: indexer}
}
// List lists all BastionHosts in the indexer.
func (s *bastionHostLister) List(selector labels.Selector) (ret []*v1alpha1.BastionHost, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.BastionHost))
})
return ret, err
}
// BastionHosts returns an object that can list and get BastionHosts.
func (s *bastionHostLister) BastionHosts(namespace string) BastionHostNamespaceLister {
return bastionHostNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// BastionHostNamespaceLister helps list and get BastionHosts.
type BastionHostNamespaceLister interface {
// List lists all BastionHosts in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1alpha1.BastionHost, err error)
// Get retrieves the BastionHost from the indexer for a given namespace and name.
Get(name string) (*v1alpha1.BastionHost, error)
BastionHostNamespaceListerExpansion
}
// bastionHostNamespaceLister implements the BastionHostNamespaceLister
// interface.
type bastionHostNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all BastionHosts in the indexer for a given namespace.
func (s bastionHostNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.BastionHost, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.BastionHost))
})
return ret, err
}
// Get retrieves the BastionHost from the indexer for a given namespace and name.
func (s bastionHostNamespaceLister) Get(name string) (*v1alpha1.BastionHost, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("bastionhost"), name)
}
return obj.(*v1alpha1.BastionHost), nil
} | |
individual_size_plots.py | import collections
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
import numpy as np
data = os.path.join(os.path.realpath(__file__), '..', '..', '..', 'data', 'larson_et_al')
sys.path.append(data)
import ce_expansion.npdb.db_inter
DEFAULT_DPI = 600 # Dots per inch
DEFAULT_POINTSIZE = 15
DEFAULT_MARKER = "o" # square
class Result(object):
def __init__(self, shape, size, composition, excess_energy, temp):
self.shape = shape
self.size = size
self.composition = composition
self.excess_energy = excess_energy
self.free_energy = self.get_free_energy_mix(temp)
def | (self, T):
"""
Calculates Excess energies plus an entropic contribution.
:param excess_energy: Excess energies from DB query
:param comp: Compositions from DB query
:param T: Temperature
:return: Free energy of mixing = excess energy (related to enthalpy of mixing) - entropy of mixing
"""
if self.composition == 1 or self.composition == 0:
return 0
# k_b T [eV] = (25.7 mEV at 298 K)
kt = 25.7E-3 / 298 * T
del_s = self.composition * np.log(self.composition) + (1 - self.composition) * np.log(1 - self.composition)
del_s *= -kt
free_energy = self.excess_energy - del_s
return free_energy
class OrderedSet(collections.UserList):
"""
Wrapper around a list that allows it to operate somewhat like a set.
"""
def add(self, value):
"""
If the value passed in is not in the set, then adds it to the set. Otherwise, does nothing.
:param value: The value to be added.
"""
if value in self.data:
pass
else:
self.data.append(value)
def get_data(alloy,
size,
temperature):
"""
Gets data for phase diagram
:param alloy: Alloy of interest
:param size: Size to consider
:param temperature: Temperature to use
:return: results object.
"""
# Book-keeping and initialization
shapes = ["icosahedron", "cuboctahedron", "elongated-pentagonal-bipyramid"]
# DB Query
results = []
for shape in shapes:
query = ce_expansion.npdb.db_inter.get_bimet_result(metals=alloy, shape=shape, num_atoms=size)
for result in query:
# Calculate composition
composition = result.n_metal1 / result.num_atoms
# Calculate EE
excess_energy = result.EE
# Add to the list of results objects
results.append(Result(shape, size, composition, excess_energy, temperature))
return results
def make_plot(results, axis, size):
"""
Plots some results, y'know?
:param results: A list of Results objects containing the shape, composition, and free energy of mixing
:param axis: Pyplot axis to plot to
:param size: size
:return: None. Drops the plot in the working directory.
"""
# Split into 3 lists, for icosahedrons, cubs, and epbs
# Each list is of the format (composition, free energy of mixing)
icos = []
cubs = []
epbs = []
types = {"icosahedron": icos,
"cuboctahedron": cubs,
"elongated-pentagonal-bipyramid": epbs}
colors = {"icosahedron": "red",
"cuboctahedron": "blue",
"elongated-pentagonal-bipyramid": "green"}
for result in results:
types[result.shape].append((result.composition, result.free_energy, colors[result.shape]))
for shape in [icos, cubs, epbs]:
x = [i[0] * 100 for i in shape]
y = [i[1] for i in shape]
color = shape[0][2]
axis.plot(x, y, color)
# Label size
axis.text(0.9, 0.5, f"N={size}", transform=axis.transAxes, size=20)
alloys = ["AgCu"]#["AgAu", "AuCu", "AgCu"]
for alloy in alloys:
tens_sizes = [3871, 2869, 2057, 1415, 561] # sizes where we skipped 10% increments
all_sizes = [309, 147, 55, 13] # sizes where we looked at all possible compositions
for sizes in [tens_sizes, all_sizes]:
fig, axes = plt.subplots(nrows=5, ncols=1, sharex=True, sharey=True)
ymin = 0
ymax = 0
for plot_index, size in enumerate(sizes):
# Query
results = get_data(alloy, size, 250)
results.sort(key=lambda i: i.composition)
# Plot
make_plot(results, axes[abs(plot_index)], size)
# plot labels
fig.text(0.5, 0.04, "Composition (%)", ha="center", size=20)
fig.text(0, 0.5, "Free Energy of Mixing (eV/atom)", va="center", rotation="vertical", size=20)
fig.text(0.5, 0.95, f"{alloy} @ 250K", size=25, ha="center")
# Tickmarks
plt.xlim(0, 100)
ylimits = {"AgAu": [-0.1, 0],
"AgCu": [-0.1+0.025, 0.025],
"AuCu": [-0.3, 0]}
ymin = ylimits[alloy][0]
ymax = ylimits[alloy][1]
plt.ylim(ymin, ymax)
for axis in axes:
# Set up X tickmarks
axis.tick_params(axis="x", labelsize=15)
axis.xaxis.set_major_locator(tick.MultipleLocator(20))
axis.xaxis.set_major_formatter(tick.FormatStrFormatter("%d"))
axis.xaxis.set_minor_locator(tick.MultipleLocator(10))
axis.xaxis.grid(True, which='major')
# Set up Y tickmarks
axis.tick_params(axis="y", labelsize=15)
axis.yaxis.set_major_locator(tick.MultipleLocator((ymax - ymin) / 2))
axis.yaxis.set_major_formatter(tick.FormatStrFormatter("%2.2f"))
axis.yaxis.set_minor_locator(tick.MultipleLocator((ymax - ymin) / 4))
# Save and quit
plt.savefig(f"{alloy},{sizes[-1]}-{sizes[0]}.png")
plt.close()
| get_free_energy_mix |
test_config.py | # Copyright 2014 Muchos authors (see AUTHORS)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from muchos.config import DeployConfig
def test_defaults():
|
def test_case_sensitive():
c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster',
'../conf/checksums', 'mycluster')
assert c.has_option('ec2', 'default_instance_type') == True
assert c.has_option('ec2', 'Default_instance_type') == False
c.set('nodes', 'CamelCaseWorker', 'worker,fluo')
c.init_nodes()
assert c.get_node('CamelCaseWorker') == ['worker', 'fluo']
| c = DeployConfig("muchos", '../conf/muchos.props.example', '../conf/hosts/example/example_cluster',
'../conf/checksums', 'mycluster')
assert c.checksum_ver('accumulo', '1.9.0') == 'f68a6145029a9ea843b0305c90a7f5f0334d8a8ceeea94734267ec36421fe7fe'
assert c.checksum('accumulo') == 'baa5e0929248ff0d96355bc7fb42a5b75d183a83364519296e07b0adbb089180'
assert c.get('ec2', 'default_instance_type') == 'm5d.large'
assert c.get('ec2', 'worker_instance_type') == 'm5d.large'
assert c.get('ec2', 'aws_ami') == 'ami-9887c6e7'
assert c.max_ephemeral() == 1
assert c.mounts(2) == ['/media/ephemeral0', '/media/ephemeral1']
assert c.node_type_map() == {'default': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]},
'worker': {'mounts': ['/media/ephemeral0', ], 'devices': ['/dev/nvme1n1', ]}}
assert c.node_type('worker1') == 'worker'
assert c.node_type('leader1') == 'default'
assert not c.has_option('ec2', 'vpc_id')
assert not c.has_option('ec2', 'subnet_id')
assert c.get('ec2', 'key_name') == 'my_aws_key'
assert c.instance_tags() == {}
assert len(c.nodes()) == 6
assert c.get_node('leader1') == ['namenode', 'resourcemanager', 'accumulomaster', 'zookeeper']
assert c.get_node('worker1') == ['worker']
assert c.get_node('worker2') == ['worker']
assert c.get_node('worker3') == ['worker']
assert c.has_service('accumulomaster')
assert not c.has_service('fluo')
assert c.get_service_hostnames('worker') == ['worker1', 'worker2', 'worker3', 'worker4']
assert c.get_service_hostnames('zookeeper') == ['leader1']
assert c.get_hosts() == {'leader2': ('10.0.0.1', None), 'leader1': ('10.0.0.0', '23.0.0.0'), 'worker1': ('10.0.0.2', None), 'worker3': ('10.0.0.4', None), 'worker2': ('10.0.0.3', None), 'worker4': ('10.0.0.5', None)}
assert c.get_public_ip('leader1') == '23.0.0.0'
assert c.get_private_ip('leader1') == '10.0.0.0'
assert c.cluster_name == 'mycluster'
assert c.version("accumulo").startswith('2.')
assert c.version("fluo").startswith('1.')
assert c.version("hadoop").startswith('3.')
assert c.version("zookeeper").startswith('3.')
assert c.get_service_private_ips("worker") == ['10.0.0.2', '10.0.0.3', '10.0.0.4', '10.0.0.5']
assert c.get('general', 'proxy_hostname') == "leader1"
assert c.proxy_public_ip() == "23.0.0.0"
assert c.proxy_private_ip() == "10.0.0.0"
assert c.get('general', 'cluster_basedir') == "/home/centos"
assert c.get('general', 'cluster_user') == "centos"
assert c.get_non_proxy() == [('10.0.0.1', 'leader2'), ('10.0.0.2', 'worker1'), ('10.0.0.3', 'worker2'),
('10.0.0.4', 'worker3'), ('10.0.0.5', 'worker4')]
assert c.get_host_services() == [('leader1', 'namenode resourcemanager accumulomaster zookeeper'), ('leader2', 'metrics'),
('worker1', 'worker'), ('worker2', 'worker'), ('worker3', 'worker'), ('worker4', 'worker')] |
bases.rs | pub const MMIO_BASE: u32 = 0x3F00_0000;
pub const GPIO_BASE: u32 = MMIO_BASE + 0x200000; | pub const VIDEOCORE_MBOX: u32 = MMIO_BASE + 0xB880; | |
__init__.py | from pyramid.settings import asbool
from magpie.constants import get_constant
from magpie.utils import get_logger
LOGGER = get_logger(__name__)
def | (config):
from magpie.ui.management.views import ManagementViews
LOGGER.info("Adding UI management...")
config.add_route(ManagementViews.view_groups.__name__,
"/ui/groups")
config.add_route(ManagementViews.add_group.__name__,
"/ui/groups/add")
config.add_route(ManagementViews.edit_group.__name__,
"/ui/groups/{group_name}/{cur_svc_type}")
config.add_route(ManagementViews.view_users.__name__,
"/ui/users")
config.add_route(ManagementViews.add_user.__name__,
"/ui/users/add")
config.add_route(ManagementViews.edit_user.__name__,
"/ui/users/{user_name}/{cur_svc_type}")
config.add_route(ManagementViews.view_services.__name__,
"/ui/services/{cur_svc_type}")
config.add_route(ManagementViews.add_service.__name__,
"/ui/services/{cur_svc_type}/add")
config.add_route(ManagementViews.edit_service.__name__,
"/ui/services/{cur_svc_type}/{service_name}")
config.add_route(ManagementViews.add_resource.__name__,
"/ui/services/{cur_svc_type}/{service_name}/add/{resource_id}")
register_user_enabled = asbool(get_constant("MAGPIE_USER_REGISTRATION_ENABLED", settings_container=config,
default_value=False, print_missing=True,
raise_missing=False, raise_not_set=False))
if register_user_enabled:
LOGGER.info("Adding UI pending user registration detail page.")
config.add_route("view_pending_user", "/ui/register/users/{user_name}")
config.add_view(ManagementViews, attr="view_pending_user", route_name="view_pending_user",
renderer="magpie.ui.management:templates/view_pending_user.mako")
config.scan()
| includeme |
TSOverviewGenerator.py | from onelang_core import *
import OneLang.One.Ast.Expressions as exprs
import OneLang.One.Ast.Statements as stats
import OneLang.One.Ast.Types as types
import OneLang.One.Ast.AstTypes as astTypes
import OneLang.One.Ast.References as refs
import OneLang.One.Ast.Interfaces as ints
import onelang_core as one
import json
import re
@one.static_init
class TSOverviewGenerator:
@classmethod
def static_init(cls):
cls.preview = cls(True)
def __init__(self, preview_only = False, show_types = False):
self.preview_only = preview_only
self.show_types = show_types
def leading(self, item):
result = ""
if item.leading_trivia != None and len(item.leading_trivia) > 0:
result += item.leading_trivia
if item.attributes != None:
result += "".join(list(map(lambda x: f'''/// {{ATTR}} name="{x}", value={json.dumps(item.attributes.get(x), separators=(',', ':'))}\n''', item.attributes.keys())))
return result
def pre_arr(self, prefix, value):
return f'''{prefix}{", ".join(value)}''' if len(value) > 0 else ""
def pre_if(self, prefix, condition):
return prefix if condition else ""
def pre(self, prefix, value):
return f'''{prefix}{value}''' if value != None else ""
def type_args(self, args):
return f'''<{", ".join(args)}>''' if args != None and len(args) > 0 else ""
def type(self, t, raw = False):
repr = "???" if t == None else t.repr()
if repr == "U:UNKNOWN":
pass
return ("" if raw else "{T}") + repr
def var(self, v):
result = ""
is_prop = isinstance(v, types.Property)
if isinstance(v, types.Field) or isinstance(v, types.Property):
m = v
result += self.pre_if("", m.is_static)
result += "private " if m.visibility == types.VISIBILITY.PRIVATE else "protected " if m.visibility == types.VISIBILITY.PROTECTED else "public " if m.visibility == types.VISIBILITY.PUBLIC else "VISIBILITY-NOT-SET"
result += f'''{("@prop " if is_prop else "")}'''
if v.mutability != None:
result += f'''{("@unused " if v.mutability.unused else "")}'''
result += f'''{("@mutated " if v.mutability.mutated else "")}'''
result += f'''{("@reass " if v.mutability.reassigned else "")}'''
result += f'''{v.name}{("()" if is_prop else "")}: {self.type(v.type)}'''
if isinstance(v, stats.VariableDeclaration) or isinstance(v, stats.ForVariable) or isinstance(v, types.Field) or isinstance(v, types.MethodParameter):
init = (v).initializer
if init != None:
result += self.pre(" = ", self.expr(init))
return result
def expr(self, expr):
res = "UNKNOWN-EXPR"
if isinstance(expr, exprs.NewExpression):
res = f'''new {self.type(expr.cls_)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedNewExpression):
res = f'''new {self.type(expr.cls_)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.Identifier):
res = f'''{{ID}}{expr.text}'''
elif isinstance(expr, exprs.PropertyAccessExpression):
res = f'''{self.expr(expr.object)}.{{PA}}{expr.property_name}'''
elif isinstance(expr, exprs.UnresolvedCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.func)}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.UnresolvedMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{UM}}{expr.method_name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.InstanceMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{self.expr(expr.object)}.{{M}}{expr.method.name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.StaticMethodCallExpression):
type_args = f'''<{", ".join(list(map(lambda x: self.type(x), expr.type_args)))}>''' if len(expr.type_args) > 0 else ""
res = f'''{expr.method.parent_interface.name}.{{M}}{expr.method.name}{type_args}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.GlobalFunctionCallExpression):
res = f'''{expr.func.name}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.LambdaCallExpression):
res = f'''{self.expr(expr.method)}({("..." if self.preview_only else ", ".join(list(map(lambda x: self.expr(x), expr.args))))})'''
elif isinstance(expr, exprs.BooleanLiteral):
res = f'''{("true" if expr.bool_value else "false")}'''
elif isinstance(expr, exprs.StringLiteral):
res = f'''{json.dumps(expr.string_value, separators=(',', ':'))}'''
elif isinstance(expr, exprs.NumericLiteral):
res = f'''{expr.value_as_text}'''
elif isinstance(expr, exprs.CharacterLiteral):
res = f'''\'{expr.char_value}\''''
elif isinstance(expr, exprs.ElementAccessExpression):
res = f'''({self.expr(expr.object)})[{self.expr(expr.element_expr)}]'''
elif isinstance(expr, exprs.TemplateString):
res = "`" + "".join(list(map(lambda x: x.literal_text if x.is_literal else "${" + self.expr(x.expression) + "}", expr.parts))) + "`"
elif isinstance(expr, exprs.BinaryExpression):
res = f'''{self.expr(expr.left)} {expr.operator} {self.expr(expr.right)}'''
elif isinstance(expr, exprs.ArrayLiteral):
res = f'''[{", ".join(list(map(lambda x: self.expr(x), expr.items)))}]'''
elif isinstance(expr, exprs.CastExpression):
res = f'''<{self.type(expr.new_type)}>({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.ConditionalExpression):
res = f'''{self.expr(expr.condition)} ? {self.expr(expr.when_true)} : {self.expr(expr.when_false)}'''
elif isinstance(expr, exprs.InstanceOfExpression):
res = f'''{self.expr(expr.expr)} instanceof {self.type(expr.check_type)}'''
elif isinstance(expr, exprs.ParenthesizedExpression):
res = f'''({self.expr(expr.expression)})'''
elif isinstance(expr, exprs.RegexLiteral):
res = f'''/{expr.pattern}/{("g" if expr.global_ else "")}{("g" if expr.case_insensitive else "")}'''
elif isinstance(expr, types.Lambda):
res = f'''({", ".join(list(map(lambda x: x.name + (": " + self.type(x.type) if x.type != None else ""), expr.parameters)))})''' + (f''' @captures({", ".join(list(map(lambda x: x.name, expr.captures)))})''' if expr.captures != None and len(expr.captures) > 0 else "") + f''' => {{ {self.raw_block(expr.body)} }}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.PREFIX:
res = f'''{expr.operator}{self.expr(expr.operand)}'''
elif isinstance(expr, exprs.UnaryExpression) and expr.unary_type == exprs.UNARY_TYPE.POSTFIX:
res = f'''{self.expr(expr.operand)}{expr.operator}'''
elif isinstance(expr, exprs.MapLiteral):
repr = ",\n".join(list(map(lambda item: f'''{item.key}: {self.expr(item.value)}''', expr.items)))
res = "{L:M}" + ("{}" if repr == "" else f'''{{\n{self.pad(repr)}\n}}''' if "\n" in repr else f'''{{ {repr} }}''')
elif isinstance(expr, exprs.NullLiteral):
res = f'''null'''
elif isinstance(expr, exprs.AwaitExpression):
res = f'''await {self.expr(expr.expr)}'''
elif isinstance(expr, refs.ThisReference):
res = f'''{{R}}this'''
elif isinstance(expr, refs.StaticThisReference):
res = f'''{{R:Static}}this'''
elif isinstance(expr, refs.EnumReference):
res = f'''{{R:Enum}}{expr.decl.name}'''
elif isinstance(expr, refs.ClassReference):
res = f'''{{R:Cls}}{expr.decl.name}'''
elif isinstance(expr, refs.MethodParameterReference):
res = f'''{{R:MetP}}{expr.decl.name}'''
elif isinstance(expr, refs.VariableDeclarationReference):
res = f'''{{V}}{expr.decl.name}'''
elif isinstance(expr, refs.ForVariableReference):
res = f'''{{R:ForV}}{expr.decl.name}'''
elif isinstance(expr, refs.ForeachVariableReference):
res = f'''{{R:ForEV}}{expr.decl.name}'''
elif isinstance(expr, refs.CatchVariableReference):
res = f'''{{R:CatchV}}{expr.decl.name}'''
elif isinstance(expr, refs.GlobalFunctionReference):
res = f'''{{R:GFunc}}{expr.decl.name}'''
elif isinstance(expr, refs.SuperReference):
res = f'''{{R}}super'''
elif isinstance(expr, refs.StaticFieldReference):
res = f'''{{R:StFi}}{expr.decl.parent_interface.name}::{expr.decl.name}'''
elif isinstance(expr, refs.StaticPropertyReference):
res = f'''{{R:StPr}}{expr.decl.parent_class.name}::{expr.decl.name}'''
elif isinstance(expr, refs.InstanceFieldReference):
res = f'''{self.expr(expr.object)}.{{F}}{expr.field.name}'''
elif isinstance(expr, refs.InstancePropertyReference):
res = f'''{self.expr(expr.object)}.{{P}}{expr.property.name}'''
elif isinstance(expr, refs.EnumMemberReference):
res = f'''{{E}}{expr.decl.parent_enum.name}::{expr.decl.name}'''
elif isinstance(expr, exprs.NullCoalesceExpression):
res = f'''{self.expr(expr.default_expr)} ?? {self.expr(expr.expr_if_null)}'''
else:
pass
if self.show_types:
res = f'''<{self.type(expr.get_type(), True)}>({res})'''
return res
def block(self, block, allow_one_liner = True):
if self.preview_only:
return " { ... }"
stmt_len = len(block.statements)
return " { }" if stmt_len == 0 else f'''\n{self.pad(self.raw_block(block))}''' if allow_one_liner and stmt_len == 1 else f''' {{\n{self.pad(self.raw_block(block))}\n}}'''
def stmt(self, stmt):
res = "UNKNOWN-STATEMENT"
if isinstance(stmt, stats.BreakStatement):
res = "break;"
elif isinstance(stmt, stats.ReturnStatement):
res = "return;" if stmt.expression == None else f'''return {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.UnsetStatement):
res = f'''unset {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ThrowStatement):
res = f'''throw {self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.ExpressionStatement):
res = f'''{self.expr(stmt.expression)};'''
elif isinstance(stmt, stats.VariableDeclaration):
res = f'''var {self.var(stmt)};'''
elif isinstance(stmt, stats.ForeachStatement):
res = f'''for (const {stmt.item_var.name} of {self.expr(stmt.items)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.IfStatement):
else_if = stmt.else_ != None and len(stmt.else_.statements) == 1 and isinstance(stmt.else_.statements[0], stats.IfStatement)
res = f'''if ({self.expr(stmt.condition)}){self.block(stmt.then)}'''
if not self.preview_only:
res += (f'''\nelse {self.stmt(stmt.else_.statements[0])}''' if else_if else "") + (f'''\nelse''' + self.block(stmt.else_) if not else_if and stmt.else_ != None else "")
elif isinstance(stmt, stats.WhileStatement):
res = f'''while ({self.expr(stmt.condition)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.ForStatement):
res = f'''for ({(self.var(stmt.item_var) if stmt.item_var != None else "")}; {self.expr(stmt.condition)}; {self.expr(stmt.incrementor)})''' + self.block(stmt.body)
elif isinstance(stmt, stats.DoStatement):
res = f'''do{self.block(stmt.body)} while ({self.expr(stmt.condition)})'''
elif isinstance(stmt, stats.TryStatement):
res = "try" + self.block(stmt.try_body, False) + (f''' catch ({stmt.catch_var.name}){self.block(stmt.catch_body)}''' if stmt.catch_body != None else "") + ("finally" + self.block(stmt.finally_body) if stmt.finally_body != None else "")
elif isinstance(stmt, stats.ContinueStatement):
res = f'''continue;'''
else:
pass
return res if self.preview_only else self.leading(stmt) + res
def raw_block(self, block):
return "\n".join(list(map(lambda stmt: self.stmt(stmt), block.statements)))
def method_base(self, method, returns):
if method == None:
return ""
name = method.name if isinstance(method, types.Method) else "constructor" if isinstance(method, types.Constructor) else method.name if isinstance(method, types.GlobalFunction) else "???"
type_args = method.type_arguments if isinstance(method, types.Method) else None
return self.pre_if("/* throws */ ", method.throws) + f'''{name}{self.type_args(type_args)}({", ".join(list(map(lambda p: self.leading(p) + self.var(p), method.parameters)))})''' + ("" if isinstance(returns, astTypes.VoidType) else f''': {self.type(returns)}''') + (f''' {{\n{self.pad(self.raw_block(method.body))}\n}}''' if method.body != None else ";")
def method(self, method):
return "" if method == None else ("static " if method.is_static else "") + ("@mutates " if method.attributes != None and "mutates" in method.attributes else "") + self.method_base(method, method.returns)
def class_like(self, cls_):
res_list = []
res_list.append("\n".join(list(map(lambda field: self.var(field) + ";", cls_.fields))))
if isinstance(cls_, types.Class):
res_list.append("\n".join(list(map(lambda prop: self.var(prop) + ";", cls_.properties))))
res_list.append(self.method_base(cls_.constructor_, astTypes.VoidType.instance))
res_list.append("\n\n".join(list(map(lambda method: self.method(method), cls_.methods))))
return self.pad("\n\n".join(list(filter(lambda x: x != "", res_list))))
def pad(self, str):
return "\n".join(list(map(lambda x: f''' {x}''', re.split("\\n", str))))
def imp(self, imp):
return "" + ("X" if isinstance(imp, types.UnresolvedImport) else "C" if isinstance(imp, types.Class) else "I" if isinstance(imp, types.Interface) else "E" if isinstance(imp, types.Enum) else "???") + f''':{imp.name}'''
def node_repr(self, node):
if isinstance(node, stats.Statement):
return self.stmt(node)
elif isinstance(node, exprs.Expression):
return self.expr(node)
else:
return "/* TODO: missing */"
def | (self, source_file):
imps = list(map(lambda imp: (f'''import * as {imp.import_as}''' if imp.import_all else f'''import {{ {", ".join(list(map(lambda x: self.imp(x), imp.imports)))} }}''') + f''' from "{imp.export_scope.package_name}{self.pre("/", imp.export_scope.scope_name)}";''', source_file.imports))
enums = list(map(lambda enum_: f'''{self.leading(enum_)}enum {enum_.name} {{ {", ".join(list(map(lambda x: x.name, enum_.values)))} }}''', source_file.enums))
intfs = list(map(lambda intf: f'''{self.leading(intf)}interface {intf.name}{self.type_args(intf.type_arguments)}''' + f'''{self.pre_arr(" extends ", list(map(lambda x: self.type(x), intf.base_interfaces)))} {{\n{self.class_like(intf)}\n}}''', source_file.interfaces))
classes = list(map(lambda cls_: f'''{self.leading(cls_)}class {cls_.name}{self.type_args(cls_.type_arguments)}''' + self.pre(" extends ", self.type(cls_.base_class) if cls_.base_class != None else None) + self.pre_arr(" implements ", list(map(lambda x: self.type(x), cls_.base_interfaces))) + f''' {{\n{self.class_like(cls_)}\n}}''', source_file.classes))
funcs = list(map(lambda func: f'''{self.leading(func)}function {func.name}{self.method_base(func, func.returns)}''', source_file.funcs))
main = self.raw_block(source_file.main_block)
result = f'''// export scope: {source_file.export_scope.package_name}/{source_file.export_scope.scope_name}\n''' + "\n\n".join(list(filter(lambda x: x != "", ["\n".join(imps), "\n".join(enums), "\n\n".join(intfs), "\n\n".join(classes), "\n\n".join(funcs), main])))
return result | generate |
deGatosYRatones.py | # -*- coding: utf-8 -*-
"""
Correa González Alfredo
De gatos y ratones
- Tengo k gatos (e I ratones) en casa.
- Les sirvo comida a mis gatos en m platos.
- Gatos y ratones han llegado a un acuerdo para repartirse el
tiempo y comida pero tienen que convencerme que están haciendo
su trabajo
- Los gatos pueden comer en sus m platos de comida.
- Los ratones pueden comer en esos platos siempre y cuando
no sean vistos.
- Si un gato ve a un ratón comiendo, se lo debe comer.
- Los platos están puestos uno junto al otro.
- Solo un animal puede comer en un plato a la vez.
- Si un gato está comiendo y ve a un ratón que comienza a comer
de oitro plato, el gato se lo ve y se lo come.
- Por acuerdo de caballeros, los gatos no pueden acercarse
a los platos mientras haya ratones comiendo.
"""
from threading import Semaphore, Thread, Event
import threading
import time
import random
hambreDeGato = 100
hambreDeRaton = 2
numeroDeGatos = 2
numeroDeRatones = 10
platos = []
p = 5
gatosComiendo = 0
ratonesComiendo = 0
mutex_hambreGato = threading.Semaphore(1)
mutex_hambreRaton = threading.Semaphore(1)
entrar_a_comer = Semaphore(1)
def gato(id,m):
global gatosComiendo, ratonesComiendo, platos, numeroDeRatones
while numeroDeRatones != 0:
time.sleep(random.random() / hambreDeGato)
entrar_a_comer.acquire()
entrar_a_comer.release()
mutex_hambreGato.acquire()
if ratonesComiendo > 0:
print("Gato {} no se acerca a los platos por su orgullo de caballero".format(id))
mutex_hambreGato.release()
else:
platos[id%m].acquire()
print("El gato {} comienza a comer del plato {}".format(id, id%m))
gatosComiendo = gatosComiendo + 1
print("El gato {} terminó de comer".format(id))
gatosComiendo = gatosComiendo - 1
platos[id%m].release()
mutex_hambreGato.release()
def raton(i |
global gatosComiendo, ratonesComiendo, platos, numeroDeRatones
while numeroDeRatones != 0:
time.sleep(random.random() / hambreDeRaton)
entrar_a_comer.acquire()
entrar_a_comer.release()
mutex_hambreRaton.acquire()
if gatosComiendo > 0:
print("Se comieron al ratón {}".format(id))
ratonesComiendo = ratonesComiendo - 1
numeroDeRatones = numeroDeRatones - 1
if(numeroDeRatones == 0):
print("¡¡¡¡¡SE MURIERON TODOS LOS RATONES :(!!!!!")
time.sleep(10000)
mutex_hambreRaton.release()
else:
platos[id%m].acquire()
print("El ratón {} comienza a comer en el plato {}".format(id, id%m))
ratonesComiendo = ratonesComiendo + 1
print("El ratón {} terminó de comer".format(id))
ratonesComiendo = ratonesComiendo - 1
platos[id%m].release()
mutex_hambreRaton.release()
for i in range(p):
platos.append(Semaphore(1))
for i in range(numeroDeGatos):
Thread(target = gato, args = [i,p]).start()
for i in range(numeroDeRatones):
Thread(target = raton, args = [i,p]).start()
| d,m): |
list_upgrade_img.go | package cloudwf
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/requests"
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/responses"
)
// ListUpgradeImg invokes the cloudwf.ListUpgradeImg API synchronously
// api document: https://help.aliyun.com/api/cloudwf/listupgradeimg.html
func (client *Client) ListUpgradeImg(request *ListUpgradeImgRequest) (response *ListUpgradeImgResponse, err error) {
response = CreateListUpgradeImgResponse()
err = client.DoAction(request, response)
return
}
// ListUpgradeImgWithChan invokes the cloudwf.ListUpgradeImg API asynchronously
// api document: https://help.aliyun.com/api/cloudwf/listupgradeimg.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) ListUpgradeImgWithChan(request *ListUpgradeImgRequest) (<-chan *ListUpgradeImgResponse, <-chan error) {
responseChan := make(chan *ListUpgradeImgResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.ListUpgradeImg(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// ListUpgradeImgWithCallback invokes the cloudwf.ListUpgradeImg API asynchronously
// api document: https://help.aliyun.com/api/cloudwf/listupgradeimg.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) ListUpgradeImgWithCallback(request *ListUpgradeImgRequest, callback func(response *ListUpgradeImgResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *ListUpgradeImgResponse
var err error
defer close(result)
response, err = client.ListUpgradeImg(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// ListUpgradeImgRequest is the request struct for api ListUpgradeImg
type ListUpgradeImgRequest struct {
*requests.RpcRequest
Length requests.Integer `position:"Query" name:"Length"`
PageIndex requests.Integer `position:"Query" name:"PageIndex"`
}
// ListUpgradeImgResponse is the response struct for api ListUpgradeImg
type ListUpgradeImgResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
Message string `json:"Message" xml:"Message"`
Data string `json:"Data" xml:"Data"`
ErrorCode int `json:"ErrorCode" xml:"ErrorCode"`
ErrorMsg string `json:"ErrorMsg" xml:"ErrorMsg"`
}
// CreateListUpgradeImgRequest creates a request to invoke ListUpgradeImg API
func CreateListUpgradeImgRequest() (request *ListUpgradeImgRequest) {
request = &ListUpgradeImgRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("cloudwf", "2017-03-28", "ListUpgradeImg", "cloudwf", "openAPI")
return
}
// CreateListUpgradeImgResponse creates a response to parse from ListUpgradeImg response
func CreateListUpgradeImgResponse() (response *ListUpgradeImgResponse) | {
response = &ListUpgradeImgResponse{
BaseResponse: &responses.BaseResponse{},
}
return
} |
|
_clusters_operations.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ClustersOperations(object):
"""ClustersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.hdinsight.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterCreateParametersExtended"
**kwargs # type: Any
):
# type: (...) -> "_models.Cluster"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ClusterCreateParametersExtended')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterCreateParametersExtended" | ):
# type: (...) -> LROPoller["_models.Cluster"]
"""Creates a new HDInsight cluster with the specified parameters.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The cluster create request.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterCreateParametersExtended
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Cluster or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.hdinsight.models.Cluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterPatchParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.Cluster"
"""Patch HDInsight cluster with the specified parameters.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The cluster patch request.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterPatchParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ClusterPatchParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Cluster"
"""Gets the specified cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Cluster, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.Cluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Cluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Cluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ClusterListResult"]
"""Lists the HDInsight clusters in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.hdinsight.models.ClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters'} # type: ignore
def _resize_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
role_name, # type: Union[str, "_models.RoleName"]
parameters, # type: "_models.ClusterResizeParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._resize_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ClusterResizeParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_resize_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/roles/{roleName}/resize'} # type: ignore
def begin_resize(
self,
resource_group_name, # type: str
cluster_name, # type: str
role_name, # type: Union[str, "_models.RoleName"]
parameters, # type: "_models.ClusterResizeParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Resizes the specified HDInsight cluster to the specified size.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param role_name: The constant value for the roleName.
:type role_name: str or ~azure.mgmt.hdinsight.models.RoleName
:param parameters: The parameters for the resize operation.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterResizeParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._resize_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
role_name=role_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/roles/{roleName}/resize'} # type: ignore
def _update_auto_scale_configuration_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
role_name, # type: Union[str, "_models.RoleName"]
parameters, # type: "_models.AutoscaleConfigurationUpdateParameter"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_auto_scale_configuration_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AutoscaleConfigurationUpdateParameter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_update_auto_scale_configuration_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/roles/{roleName}/autoscale'} # type: ignore
def begin_update_auto_scale_configuration(
self,
resource_group_name, # type: str
cluster_name, # type: str
role_name, # type: Union[str, "_models.RoleName"]
parameters, # type: "_models.AutoscaleConfigurationUpdateParameter"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Updates the Autoscale Configuration for HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param role_name: The constant value for the roleName.
:type role_name: str or ~azure.mgmt.hdinsight.models.RoleName
:param parameters: The parameters for the update autoscale configuration operation.
:type parameters: ~azure.mgmt.hdinsight.models.AutoscaleConfigurationUpdateParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_auto_scale_configuration_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
role_name=role_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_auto_scale_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/roles/{roleName}/autoscale'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ClusterListResult"]
"""Lists all the HDInsight clusters under the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ClusterListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.hdinsight.models.ClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HDInsight/clusters'} # type: ignore
def _rotate_disk_encryption_key_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterDiskEncryptionParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._rotate_disk_encryption_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ClusterDiskEncryptionParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_disk_encryption_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/rotatediskencryptionkey'} # type: ignore
def begin_rotate_disk_encryption_key(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ClusterDiskEncryptionParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Rotate disk encryption key of the specified HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The parameters for the disk encryption operation.
:type parameters: ~azure.mgmt.hdinsight.models.ClusterDiskEncryptionParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._rotate_disk_encryption_key_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_disk_encryption_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/rotatediskencryptionkey'} # type: ignore
def get_gateway_settings(
self,
resource_group_name, # type: str
cluster_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.GatewaySettings"
"""Gets the gateway settings for the specified cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GatewaySettings, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.GatewaySettings
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GatewaySettings"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get_gateway_settings.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GatewaySettings', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_gateway_settings.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/getGatewaySettings'} # type: ignore
def _update_gateway_settings_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.UpdateGatewaySettingsParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_gateway_settings_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'UpdateGatewaySettingsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_update_gateway_settings_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/updateGatewaySettings'} # type: ignore
def begin_update_gateway_settings(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.UpdateGatewaySettingsParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Configures the gateway settings on the specified cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The cluster configurations.
:type parameters: ~azure.mgmt.hdinsight.models.UpdateGatewaySettingsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_gateway_settings_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_gateway_settings.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/updateGatewaySettings'} # type: ignore
def get_azure_async_operation_status(
self,
resource_group_name, # type: str
cluster_name, # type: str
operation_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.AsyncOperationResult"
"""The the async operation status.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param operation_id: The long running operation id.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AsyncOperationResult, or the result of cls(response)
:rtype: ~azure.mgmt.hdinsight.models.AsyncOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AsyncOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = self.get_azure_async_operation_status.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AsyncOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_azure_async_operation_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/azureasyncoperations/{operationId}'} # type: ignore
def _update_identity_certificate_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.UpdateClusterIdentityCertificateParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_identity_certificate_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'UpdateClusterIdentityCertificateParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_update_identity_certificate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/updateClusterIdentityCertificate'} # type: ignore
def begin_update_identity_certificate(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.UpdateClusterIdentityCertificateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Updates the cluster identity certificate.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The cluster configurations.
:type parameters: ~azure.mgmt.hdinsight.models.UpdateClusterIdentityCertificateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_identity_certificate_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_identity_certificate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/updateClusterIdentityCertificate'} # type: ignore
def _execute_script_actions_initial(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ExecuteScriptActionParameters"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._execute_script_actions_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExecuteScriptActionParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_execute_script_actions_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/executeScriptActions'} # type: ignore
def begin_execute_script_actions(
self,
resource_group_name, # type: str
cluster_name, # type: str
parameters, # type: "_models.ExecuteScriptActionParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Executes script actions on the specified HDInsight cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_name: The name of the cluster.
:type cluster_name: str
:param parameters: The parameters for executing script actions.
:type parameters: ~azure.mgmt.hdinsight.models.ExecuteScriptActionParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._execute_script_actions_initial(
resource_group_name=resource_group_name,
cluster_name=cluster_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'clusterName': self._serialize.url("cluster_name", cluster_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_execute_script_actions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HDInsight/clusters/{clusterName}/executeScriptActions'} # type: ignore | **kwargs # type: Any |
main.go | package main
import (
"fmt"
"time"
)
func main() | {
ch := make(chan int, 10)
ch2 := make(chan int, 10)
go func() {
var i int
for {
ch <- i
time.Sleep(time.Second)
ch2 <- i * i
time.Sleep(time.Second)
i++
}
}()
for {
select {
case v := <-ch:
fmt.Println(v)
case v := <-ch2:
fmt.Println(v)
case <-time.After(time.Second):
fmt.Println("get data timeout")
time.Sleep(time.Second)
}
}
} |
|
lab-05-1-logistic_regression.py | # Lab 5 Logistic Regression Classifier
import tensorflow as tf
tf.set_random_seed(777) # for reproducibility
x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]]
y_data = [[0], [0], [0], [1], [1], [1]] | X = tf.placeholder(tf.float32, shape=[None, 2])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([2, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# Hypothesis using sigmoid: tf.div(1., 1. + tf.exp(tf.matmul(X, W)))
hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
# Cost function
cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) *
tf.log(1 - hypothesis))
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
# Accuracy computation
# True if hypothesis>0.5 else False
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
# Launch graph
with tf.Session() as sess:
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())
feed = {X: x_data, Y: y_data}
for step in range(10001):
sess.run(train, feed_dict=feed)
if step % 200 == 0:
print(step, sess.run(cost, feed_dict=feed), sess.run(W))
# Accuracy report
h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict=feed)
print("\nHypothesis: ", h, "\nCorrect (Y): ", c, "\nAccuracy: ", a)
'''
Hypothesis: [[ 0.03074029]
[ 0.15884677]
[ 0.30486736]
[ 0.78138196]
[ 0.93957496]
[ 0.98016882]]
Correct (Y): [[ 0.]
[ 0.]
[ 0.]
[ 1.]
[ 1.]
[ 1.]]
Accuracy: 1.0
''' |
# placeholders for a tensor that will be always fed. |
resource-monitor-grant.ts | // https://www.terraform.io/docs/providers/snowflake/r/resource_monitor_grant.html
// generated from terraform resource schema
import { Construct } from 'constructs';
import * as cdktf from 'cdktf';
// Configuration
export interface ResourceMonitorGrantConfig extends cdktf.TerraformMetaArguments {
/**
* Identifier for the resource monitor; must be unique for your account.
*
* Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/snowflake/r/resource_monitor_grant.html#monitor_name ResourceMonitorGrant#monitor_name}
*/
readonly monitorName: string;
/**
* The privilege to grant on the resource monitor.
*
* Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/snowflake/r/resource_monitor_grant.html#privilege ResourceMonitorGrant#privilege}
*/
readonly privilege?: string;
/**
* Grants privilege to these roles.
*
* Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/snowflake/r/resource_monitor_grant.html#roles ResourceMonitorGrant#roles}
*/
readonly roles?: string[];
/**
* When this is set to true, allows the recipient role to grant the privileges to other roles.
*
* Docs at Terraform Registry: {@link https://www.terraform.io/docs/providers/snowflake/r/resource_monitor_grant.html#with_grant_option ResourceMonitorGrant#with_grant_option}
*/
readonly withGrantOption?: boolean | cdktf.IResolvable;
}
/**
* Represents a {@link https://www.terraform.io/docs/providers/snowflake/r/resource_monitor_grant.html snowflake_resource_monitor_grant}
*/
export class | extends cdktf.TerraformResource {
// =================
// STATIC PROPERTIES
// =================
public static readonly tfResourceType: string = "snowflake_resource_monitor_grant";
// ===========
// INITIALIZER
// ===========
/**
* Create a new {@link https://www.terraform.io/docs/providers/snowflake/r/resource_monitor_grant.html snowflake_resource_monitor_grant} Resource
*
* @param scope The scope in which to define this construct
* @param id The scoped construct ID. Must be unique amongst siblings in the same scope
* @param options ResourceMonitorGrantConfig
*/
public constructor(scope: Construct, id: string, config: ResourceMonitorGrantConfig) {
super(scope, id, {
terraformResourceType: 'snowflake_resource_monitor_grant',
terraformGeneratorMetadata: {
providerName: 'snowflake'
},
provider: config.provider,
dependsOn: config.dependsOn,
count: config.count,
lifecycle: config.lifecycle
});
this._monitorName = config.monitorName;
this._privilege = config.privilege;
this._roles = config.roles;
this._withGrantOption = config.withGrantOption;
}
// ==========
// ATTRIBUTES
// ==========
// id - computed: true, optional: true, required: false
public get id() {
return this.getStringAttribute('id');
}
// monitor_name - computed: false, optional: false, required: true
private _monitorName?: string;
public get monitorName() {
return this.getStringAttribute('monitor_name');
}
public set monitorName(value: string) {
this._monitorName = value;
}
// Temporarily expose input value. Use with caution.
public get monitorNameInput() {
return this._monitorName
}
// privilege - computed: false, optional: true, required: false
private _privilege?: string | undefined;
public get privilege() {
return this.getStringAttribute('privilege');
}
public set privilege(value: string | undefined) {
this._privilege = value;
}
public resetPrivilege() {
this._privilege = undefined;
}
// Temporarily expose input value. Use with caution.
public get privilegeInput() {
return this._privilege
}
// roles - computed: false, optional: true, required: false
private _roles?: string[] | undefined;
public get roles() {
return this.getListAttribute('roles');
}
public set roles(value: string[] | undefined) {
this._roles = value;
}
public resetRoles() {
this._roles = undefined;
}
// Temporarily expose input value. Use with caution.
public get rolesInput() {
return this._roles
}
// with_grant_option - computed: false, optional: true, required: false
private _withGrantOption?: boolean | cdktf.IResolvable | undefined;
public get withGrantOption() {
return this.getBooleanAttribute('with_grant_option') as any;
}
public set withGrantOption(value: boolean | cdktf.IResolvable | undefined) {
this._withGrantOption = value;
}
public resetWithGrantOption() {
this._withGrantOption = undefined;
}
// Temporarily expose input value. Use with caution.
public get withGrantOptionInput() {
return this._withGrantOption
}
// =========
// SYNTHESIS
// =========
protected synthesizeAttributes(): { [name: string]: any } {
return {
monitor_name: cdktf.stringToTerraform(this._monitorName),
privilege: cdktf.stringToTerraform(this._privilege),
roles: cdktf.listMapper(cdktf.stringToTerraform)(this._roles),
with_grant_option: cdktf.booleanToTerraform(this._withGrantOption),
};
}
}
| ResourceMonitorGrant |
reshape.py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 16:06:35 2018
@author: libo
"""
from PIL import Image
import os
def | (image_path, new_path): # 统一图片尺寸
print('============>>修改图片尺寸')
for img_name in os.listdir(image_path):
img_path = image_path + "/" + img_name # 获取该图片全称
image = Image.open(img_path) # 打开特定一张图片
image = image.resize((512, 512)) # 设置需要转换的图片大小
# process the 1 channel image
image.save(new_path + '/' + img_name)
print("end the processing!")
if __name__ == '__main__':
print("ready for :::::::: ")
ori_path = r"Z:\pycharm_projects\ssd\VOC2007\JPEGImages" # 输入图片的文件夹路径
new_path = 'Z:/pycharm_projects/ssd/VOC2007/reshape' # resize之后的文件夹路径
image_resize(ori_path, new_path) | image_resize |
flint_sender_sdk.js | /*! flint-web-sdk build:0.1.0, development. Copyright(C) 2013-2014 www.OpenFlint.org */
(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
var dataBrowser = [
{
string: navigator.userAgent,
subString: "Chrome",
identity: "Chrome"
},
{
string: navigator.userAgent,
subString: "OmniWeb",
versionSearch: "OmniWeb/",
identity: "OmniWeb"
},
{
string: navigator.vendor,
subString: "Apple",
identity: "Safari",
versionSearch: "Version"
},
{
prop: window.opera,
identity: "Opera",
versionSearch: "Version"
},
{
string: navigator.vendor,
subString: "iCab",
identity: "iCab"
},
{
string: navigator.vendor,
subString: "KDE",
identity: "Konqueror"
},
{
string: navigator.userAgent,
subString: "Firefox",
identity: "Firefox"
},
{
string: navigator.vendor,
subString: "Camino",
identity: "Camino"
},
{ // for newer Netscapes (6+)
string: navigator.userAgent,
subString: "Netscape",
identity: "Netscape"
},
{
string: navigator.userAgent,
subString: "MSIE",
identity: "Explorer",
versionSearch: "MSIE"
},
{
string: navigator.userAgent,
subString: "Gecko",
identity: "Mozilla",
versionSearch: "rv"
},
{ // for older Netscapes (4-)
string: navigator.userAgent,
subString: "Mozilla",
identity: "Netscape",
versionSearch: "Mozilla"
}
];
var dataOS = [
{
string: navigator.platform,
subString: "Win",
identity: "Windows"
},
{
string: navigator.platform,
subString: "Mac",
identity: "Mac"
},
{
string: navigator.userAgent,
subString: "iPhone",
identity: "iPhone/iPod"
},
{
string: navigator.platform,
subString: "Linux",
identity: "Linux"
}
];
BrowserDetect = function () {
};
BrowserDetect.prototype.init = function () {
this.browser = this.searchString(dataBrowser) || "An unknown browser";
this.version = this.searchVersion(navigator.userAgent)
|| this.searchVersion(navigator.appVersion)
|| "an unknown version";
this.OS = this.searchString(dataOS) || "an unknown OS";
};
BrowserDetect.prototype.searchString = function (data) {
for (var i = 0; i < data.length; i++) {
var dataString = data[i].string;
var dataProp = data[i].prop;
this.versionSearchString = data[i].versionSearch || data[i].identity;
if (dataString) {
if (dataString.indexOf(data[i].subString) != -1)
return data[i].identity;
}
else if (dataProp)
return data[i].identity;
}
};
BrowserDetect.prototype.searchVersion = function (dataString) {
var sIndex = dataString.indexOf(this.versionSearchString);
if (sIndex == -1) return;
var reg = /(?:;|\s|$)/gi;
reg.lastIndex = sIndex = sIndex + this.versionSearchString.length + 1;
var eIndex = reg.exec(dataString).index;
return dataString.substring(sIndex, eIndex);
//return parseFloat(dataString.substring(index+this.versionSearchString.length+1));
};
module.exports = BrowserDetect;
},{}],2:[function(require,module,exports){
var FlintConstants;
FlintConstants = (function() {
function FlintConstants() {}
FlintConstants.DEFAULT_CHANNEL_NAME = 'channelBaseUrl';
FlintConstants.DEFAULT_NAMESPACE = 'urn:flint:org.openflint.default';
FlintConstants.MEDIA_NAMESPACE = 'urn:flint:org.openflint.fling.media';
return FlintConstants;
})();
module.exports = FlintConstants;
},{}],3:[function(require,module,exports){
var EventEmitter, MessageBus,
__hasProp = {}.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; };
EventEmitter = require('eventemitter3');
MessageBus = (function(_super) {
__extends(MessageBus, _super);
function MessageBus(channel, namespace) {
this.channel = channel;
this.namespace = namespace;
this._init();
}
MessageBus.prototype._init = function() {
throw 'Not Implement';
};
MessageBus.prototype.send = function() {
throw 'Not Implement';
};
return MessageBus;
})(EventEmitter);
module.exports = MessageBus;
},{"eventemitter3":17}],4:[function(require,module,exports){
var EventEmitter, MessageChannel,
__hasProp = {}.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; };
EventEmitter = require('eventemitter3');
MessageChannel = (function(_super) {
var CLOSED, CLOSING, CONNECTING, OPEN;
__extends(MessageChannel, _super);
CONNECTING = 0;
OPEN = 1;
CLOSING = 2;
CLOSED = 3;
function MessageChannel(name, url) {
this.name = name;
this.url = url;
this.wsChannel = null;
this.opened = false;
}
MessageChannel.prototype.isOpened = function() {
return this.opened;
};
MessageChannel.prototype.getName = function() {
return this.name;
};
MessageChannel.prototype.open = function(url) {
if (url) {
this.url = url;
}
this.wsChannel = new WebSocket(this.url);
this.wsChannel.onopen = (function(_this) {
return function(event) {
return _this.emit('open', event);
};
})(this);
this.wsChannel.onclose = (function(_this) {
return function(event) {
return _this.emit('close', event);
};
})(this);
this.wsChannel.onerror = (function(_this) {
return function(event) {
return _this.emit('error', event);
};
})(this);
this._initOnMessage();
return this.opened = true;
};
MessageChannel.prototype._initOnMessage = function() {
return this.wsChannel.onmessage = (function(_this) {
return function(event) {
return _this.emit('message', event.data);
};
})(this);
};
MessageChannel.prototype.close = function() {
var _ref;
this.opened = false;
return (_ref = this.wsChannel) != null ? _ref.close() : void 0;
};
MessageChannel.prototype.send = function(data) {
var _ref, _ref1, _ref2;
if (!this.opened) {
console.warn('MessageChannel is not opened, cannot sent: ', data);
return;
}
if (!data) {
return;
}
if (((_ref = this.wsChannel) != null ? _ref.readyState : void 0) === OPEN) {
return (_ref1 = this.wsChannel) != null ? _ref1.send(data) : void 0;
} else if (((_ref2 = this.wsChannel) != null ? _ref2.readyState : void 0) === CONNECTING) {
return setTimeout(((function(_this) {
return function() {
return _this.send(data);
};
})(this)), 50);
} else {
return console.error('MessageChannel send failed, channel readyState is ', this.wsChannel.readyState);
}
};
return MessageChannel;
})(EventEmitter);
module.exports = MessageChannel;
},{"eventemitter3":17}],5:[function(require,module,exports){
var BrowserDetect, Platform;
BrowserDetect = require('./BrowserDetect');
Platform = (function() {
function Platform() {}
Platform.detector = null;
Platform.getPlatform = function() {
var platform;
if (!Platform.detector) {
Platform.detector = new BrowserDetect();
Platform.detector.init();
if (Platform.detector.browser.toLowerCase() === 'firefox') {
if (window.MozActivity !== void 0) {
Platform.detector.browser = 'ffos';
}
}
}
platform = {
browser: Platform.detector.browser.toLowerCase(),
version: Platform.detector.version.toLowerCase(),
os: Platform.detector.OS.toLowerCase()
};
return platform;
};
return Platform;
})();
module.exports = Platform;
},{"./BrowserDetect":1}],6:[function(require,module,exports){
module.exports.RTCSessionDescription = window.RTCSessionDescription ||
window.mozRTCSessionDescription;
module.exports.RTCPeerConnection = window.RTCPeerConnection ||
window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
module.exports.RTCIceCandidate = window.RTCIceCandidate ||
window.mozRTCIceCandidate;
},{}],7:[function(require,module,exports){
var util = require('./util');
var EventEmitter = require('eventemitter3');
var Negotiator = require('./negotiator');
var Reliable = require('reliable');
/**
* Wraps a DataChannel between two Peers.
*/
function DataConnection(peer, provider, options) {
if (!(this instanceof DataConnection)) return new DataConnection(peer, provider, options);
EventEmitter.call(this);
this.options = util.extend({
serialization: 'binary',
reliable: false
}, options);
// Connection is not open yet.
this.open = false;
this.type = 'data';
this.peer = peer;
this.provider = provider;
this.id = this.options.connectionId || DataConnection._idPrefix + util.randomToken();
this.label = this.options.label || this.id;
this.metadata = this.options.metadata;
this.serialization = this.options.serialization;
this.reliable = this.options.reliable;
// Data channel buffering.
this._buffer = [];
this._buffering = false;
this.bufferSize = 0;
// For storing large data.
this._chunkedData = {};
if (this.options._payload) {
this._peerBrowser = this.options._payload.browser;
}
Negotiator.startConnection(
this,
this.options._payload || {
originator: true
}
);
}
util.inherits(DataConnection, EventEmitter);
DataConnection._idPrefix = 'dc_';
/** Called by the Negotiator when the DataChannel is ready. */
DataConnection.prototype.initialize = function(dc) {
this._dc = this.dataChannel = dc;
this._configureDataChannel();
}
DataConnection.prototype._configureDataChannel = function() {
var self = this;
if (util.supports.sctp) {
this._dc.binaryType = 'arraybuffer';
}
this._dc.onopen = function() {
util.log('Data channel connection success');
self.open = true;
self.emit('open');
}
// Use the Reliable shim for non Firefox browsers
if (!util.supports.sctp && this.reliable) {
this._reliable = new Reliable(this._dc, util.debug);
}
if (this._reliable) {
this._reliable.onmessage = function(msg) {
self.emit('data', msg);
};
} else {
this._dc.onmessage = function(e) {
self._handleDataMessage(e);
};
}
this._dc.onclose = function(e) {
util.log('DataChannel closed for:', self.peer);
self.close();
};
}
// Handles a DataChannel message.
DataConnection.prototype._handleDataMessage = function(e) {
var self = this;
var data = e.data;
var datatype = data.constructor;
if (this.serialization === 'binary' || this.serialization === 'binary-utf8') {
if (datatype === Blob) {
// Datatype should never be blob
util.blobToArrayBuffer(data, function(ab) {
data = util.unpack(ab);
self.emit('data', data);
});
return;
} else if (datatype === ArrayBuffer) {
data = util.unpack(data);
} else if (datatype === String) {
// String fallback for binary data for browsers that don't support binary yet
var ab = util.binaryStringToArrayBuffer(data);
data = util.unpack(ab);
}
} else if (this.serialization === 'json') {
data = JSON.parse(data);
}
// Check if we've chunked--if so, piece things back together.
// We're guaranteed that this isn't 0.
if (data.__peerData) {
var id = data.__peerData;
var chunkInfo = this._chunkedData[id] || {data: [], count: 0, total: data.total};
chunkInfo.data[data.n] = data.data;
chunkInfo.count += 1;
if (chunkInfo.total === chunkInfo.count) {
// Clean up before making the recursive call to `_handleDataMessage`.
delete this._chunkedData[id];
// We've received all the chunks--time to construct the complete data.
data = new Blob(chunkInfo.data);
this._handleDataMessage({data: data});
}
this._chunkedData[id] = chunkInfo;
return;
}
this.emit('data', data);
}
/**
* Exposed functionality for users.
*/
/** Allows user to close connection. */
DataConnection.prototype.close = function() {
if (!this.open) {
return;
}
this.open = false;
Negotiator.cleanup(this);
this.emit('close');
}
/** Allows user to send data. */
DataConnection.prototype.send = function(data, chunked) {
if (!this.open) {
this.emit('error', new Error('Connection is not open. You should listen for the `open` event before sending messages.'));
return;
}
if (this._reliable) {
// Note: reliable shim sending will make it so that you cannot customize
// serialization.
this._reliable.send(data);
return;
}
var self = this;
if (this.serialization === 'json') {
this._bufferedSend(JSON.stringify(data));
} else if (this.serialization === 'binary' || this.serialization === 'binary-utf8') {
var blob = util.pack(data);
// For Chrome-Firefox interoperability, we need to make Firefox "chunk"
// the data it sends out.
var needsChunking = util.chunkedBrowsers[this._peerBrowser] || util.chunkedBrowsers[util.browser];
if (needsChunking && !chunked && blob.size > util.chunkedMTU) {
this._sendChunks(blob);
return; |
// DataChannel currently only supports strings.
if (!util.supports.sctp) {
util.blobToBinaryString(blob, function(str) {
self._bufferedSend(str);
});
} else if (!util.supports.binaryBlob) {
// We only do this if we really need to (e.g. blobs are not supported),
// because this conversion is costly.
util.blobToArrayBuffer(blob, function(ab) {
self._bufferedSend(ab);
});
} else {
this._bufferedSend(blob);
}
} else {
this._bufferedSend(data);
}
}
DataConnection.prototype._bufferedSend = function(msg) {
if (this._buffering || !this._trySend(msg)) {
this._buffer.push(msg);
this.bufferSize = this._buffer.length;
}
}
// Returns true if the send succeeds.
DataConnection.prototype._trySend = function(msg) {
try {
this._dc.send(msg);
} catch (e) {
this._buffering = true;
var self = this;
setTimeout(function() {
// Try again.
self._buffering = false;
self._tryBuffer();
}, 100);
return false;
}
return true;
}
// Try to send the first message in the buffer.
DataConnection.prototype._tryBuffer = function() {
if (this._buffer.length === 0) {
return;
}
var msg = this._buffer[0];
if (this._trySend(msg)) {
this._buffer.shift();
this.bufferSize = this._buffer.length;
this._tryBuffer();
}
}
DataConnection.prototype._sendChunks = function(blob) {
var blobs = util.chunk(blob);
for (var i = 0, ii = blobs.length; i < ii; i += 1) {
var blob = blobs[i];
this.send(blob, true);
}
}
DataConnection.prototype.handleMessage = function(message) {
var payload = message.payload;
switch (message.type) {
case 'ANSWER':
this._peerBrowser = payload.browser;
// Forward to negotiator
Negotiator.handleSDP(message.type, this, payload.sdp);
break;
case 'CANDIDATE':
Negotiator.handleCandidate(this, payload.candidate);
break;
default:
util.warn('Unrecognized message type:', message.type, 'from peer:', this.peer);
break;
}
}
module.exports = DataConnection;
},{"./negotiator":9,"./util":12,"eventemitter3":17,"reliable":20}],8:[function(require,module,exports){
var util = require('./util');
var EventEmitter = require('eventemitter3');
var Negotiator = require('./negotiator');
/**
* Wraps the streaming interface between two Peers.
*/
function MediaConnection(peer, provider, options) {
if (!(this instanceof MediaConnection)) return new MediaConnection(peer, provider, options);
EventEmitter.call(this);
this.options = util.extend({}, options);
this.open = false;
this.type = 'media';
this.peer = peer;
this.provider = provider;
this.metadata = this.options.metadata;
this.localStream = this.options._stream;
this.id = this.options.connectionId || MediaConnection._idPrefix + util.randomToken();
if (this.localStream) {
Negotiator.startConnection(
this,
{_stream: this.localStream, originator: true}
);
}
};
util.inherits(MediaConnection, EventEmitter);
MediaConnection._idPrefix = 'mc_';
MediaConnection.prototype.addStream = function(remoteStream) {
util.log('Receiving stream', remoteStream);
this.remoteStream = remoteStream;
this.emit('stream', remoteStream); // Should we call this `open`?
};
MediaConnection.prototype.handleMessage = function(message) {
var payload = message.payload;
switch (message.type) {
case 'ANSWER':
// Forward to negotiator
Negotiator.handleSDP(message.type, this, payload.sdp);
this.open = true;
break;
case 'CANDIDATE':
Negotiator.handleCandidate(this, payload.candidate);
break;
default:
util.warn('Unrecognized message type:', message.type, 'from peer:', this.peer);
break;
}
}
MediaConnection.prototype.answer = function(stream) {
if (this.localStream) {
util.warn('Local stream already exists on this MediaConnection. Are you answering a call twice?');
return;
}
this.options._payload._stream = stream;
this.localStream = stream;
Negotiator.startConnection(
this,
this.options._payload
)
// Retrieve lost messages stored because PeerConnection not set up.
var messages = this.provider._getMessages(this.id);
for (var i = 0, ii = messages.length; i < ii; i += 1) {
this.handleMessage(messages[i]);
}
this.open = true;
};
/**
* Exposed functionality for users.
*/
/** Allows user to close connection. */
MediaConnection.prototype.close = function() {
if (!this.open) {
return;
}
this.open = false;
Negotiator.cleanup(this);
this.emit('close')
};
module.exports = MediaConnection;
},{"./negotiator":9,"./util":12,"eventemitter3":17}],9:[function(require,module,exports){
var util = require('./util');
var RTCPeerConnection = require('./adapter').RTCPeerConnection;
var RTCSessionDescription = require('./adapter').RTCSessionDescription;
var RTCIceCandidate = require('./adapter').RTCIceCandidate;
/**
* Manages all negotiations between Peers.
*/
var Negotiator = {
pcs: {
data: {},
media: {}
}, // type => {peerId: {pc_id: pc}}.
//providers: {}, // provider's id => providers (there may be multiple providers/client.
queue: [] // connections that are delayed due to a PC being in use.
}
Negotiator._idPrefix = 'pc_';
/** Returns a PeerConnection object set up correctly (for data, media). */
Negotiator.startConnection = function(connection, options) {
var pc = Negotiator._getPeerConnection(connection, options);
if (connection.type === 'media' && options._stream) {
// Add the stream.
pc.addStream(options._stream);
}
// Set the connection's PC.
connection.pc = connection.peerConnection = pc;
// What do we need to do now?
if (options.originator) {
if (connection.type === 'data') {
// Create the datachannel.
var config = {};
// Dropping reliable:false support, since it seems to be crashing
// Chrome.
/*if (util.supports.sctp && !options.reliable) {
// If we have canonical reliable support...
config = {maxRetransmits: 0};
}*/
// Fallback to ensure older browsers don't crash.
if (!util.supports.sctp) {
config = {reliable: options.reliable};
}
var dc = pc.createDataChannel(connection.label, config);
connection.initialize(dc);
}
if (!util.supports.onnegotiationneeded) {
Negotiator._makeOffer(connection);
}
} else {
Negotiator.handleSDP('OFFER', connection, options.sdp);
}
}
Negotiator._getPeerConnection = function(connection, options) {
if (!Negotiator.pcs[connection.type]) {
util.error(connection.type + ' is not a valid connection type. Maybe you overrode the `type` property somewhere.');
}
if (!Negotiator.pcs[connection.type][connection.peer]) {
Negotiator.pcs[connection.type][connection.peer] = {};
}
var peerConnections = Negotiator.pcs[connection.type][connection.peer];
var pc;
// Not multiplexing while FF and Chrome have not-great support for it.
/*if (options.multiplex) {
ids = Object.keys(peerConnections);
for (var i = 0, ii = ids.length; i < ii; i += 1) {
pc = peerConnections[ids[i]];
if (pc.signalingState === 'stable') {
break; // We can go ahead and use this PC.
}
}
} else */
if (options.pc) { // Simplest case: PC id already provided for us.
pc = Negotiator.pcs[connection.type][connection.peer][options.pc];
}
if (!pc || pc.signalingState !== 'stable') {
pc = Negotiator._startPeerConnection(connection);
}
return pc;
}
/*
Negotiator._addProvider = function(provider) {
if ((!provider.id && !provider.disconnected) || !provider.socket.open) {
// Wait for provider to obtain an ID.
provider.on('open', function(id) {
Negotiator._addProvider(provider);
});
} else {
Negotiator.providers[provider.id] = provider;
}
}*/
/** Start a PC. */
Negotiator._startPeerConnection = function(connection) {
util.log('Creating RTCPeerConnection.');
var id = Negotiator._idPrefix + util.randomToken();
var optional = {};
if (connection.type === 'data' && !util.supports.sctp) {
optional = {optional: [{RtpDataChannels: true}]};
} else if (connection.type === 'media') {
// Interop req for chrome.
optional = {optional: [{DtlsSrtpKeyAgreement: true}]};
}
var pc = new RTCPeerConnection(connection.provider.options.config, optional);
Negotiator.pcs[connection.type][connection.peer][id] = pc;
Negotiator._setupListeners(connection, pc, id);
return pc;
}
/** Set up various WebRTC listeners. */
Negotiator._setupListeners = function(connection, pc, pc_id) {
var peerId = connection.peer;
var connectionId = connection.id;
var provider = connection.provider;
// ICE CANDIDATES.
util.log('Listening for ICE candidates.');
pc.onicecandidate = function(evt) {
if (evt.candidate) {
util.log('Received ICE candidates for:', connection.peer);
provider.socket.send({
type: 'CANDIDATE',
payload: {
candidate: evt.candidate,
type: connection.type,
connectionId: connection.id
},
dst: peerId
});
}
};
pc.oniceconnectionstatechange = function() {
switch (pc.iceConnectionState) {
case 'disconnected':
case 'failed':
util.log('iceConnectionState is disconnected, closing connections to ' + peerId);
connection.close();
break;
case 'completed':
pc.onicecandidate = util.noop;
break;
}
};
// Fallback for older Chrome impls.
pc.onicechange = pc.oniceconnectionstatechange;
// ONNEGOTIATIONNEEDED (Chrome)
util.log('Listening for `negotiationneeded`');
pc.onnegotiationneeded = function() {
util.log('`negotiationneeded` triggered');
if (pc.signalingState == 'stable') {
Negotiator._makeOffer(connection);
} else {
util.log('onnegotiationneeded triggered when not stable. Is another connection being established?');
}
};
// DATACONNECTION.
util.log('Listening for data channel');
// Fired between offer and answer, so options should already be saved
// in the options hash.
pc.ondatachannel = function(evt) {
util.log('Received data channel');
var dc = evt.channel;
var connection = provider.getConnection(peerId, connectionId);
connection.initialize(dc);
};
// MEDIACONNECTION.
util.log('Listening for remote stream');
pc.onaddstream = function(evt) {
util.log('Received remote stream');
var stream = evt.stream;
var connection = provider.getConnection(peerId, connectionId);
// 10/10/2014: looks like in Chrome 38, onaddstream is triggered after
// setting the remote description. Our connection object in these cases
// is actually a DATA connection, so addStream fails.
// TODO: This is hopefully just a temporary fix. We should try to
// understand why this is happening.
if (connection.type === 'media') {
connection.addStream(stream);
}
};
}
Negotiator.cleanup = function(connection) {
util.log('Cleaning up PeerConnection to ' + connection.peer);
var pc = connection.pc;
if (!!pc && (pc.readyState !== 'closed' || pc.signalingState !== 'closed')) {
pc.close();
connection.pc = null;
}
}
Negotiator._makeOffer = function(connection) {
var pc = connection.pc;
pc.createOffer(function(offer) {
util.log('Created offer.');
if (!util.supports.sctp && connection.type === 'data' && connection.reliable) {
offer.sdp = Reliable.higherBandwidthSDP(offer.sdp);
}
pc.setLocalDescription(offer, function() {
util.log('Set localDescription: offer', 'for:', connection.peer);
connection.provider.socket.send({
type: 'OFFER',
payload: {
sdp: offer,
type: connection.type,
label: connection.label,
connectionId: connection.id,
reliable: connection.reliable,
serialization: connection.serialization,
metadata: connection.metadata,
browser: util.browser
},
dst: connection.peer
});
}, function(err) {
connection.provider.emitError('webrtc', err);
util.log('Failed to setLocalDescription, ', err);
});
}, function(err) {
connection.provider.emitError('webrtc', err);
util.log('Failed to createOffer, ', err);
}, connection.options.constraints);
}
Negotiator._makeAnswer = function(connection) {
var pc = connection.pc;
pc.createAnswer(function(answer) {
util.log('Created answer.');
if (!util.supports.sctp && connection.type === 'data' && connection.reliable) {
answer.sdp = Reliable.higherBandwidthSDP(answer.sdp);
}
pc.setLocalDescription(answer, function() {
util.log('Set localDescription: answer', 'for:', connection.peer);
connection.provider.socket.send({
type: 'ANSWER',
payload: {
sdp: answer,
type: connection.type,
connectionId: connection.id,
browser: util.browser
},
dst: connection.peer
});
}, function(err) {
connection.provider.emitError('webrtc', err);
util.log('Failed to setLocalDescription, ', err);
});
}, function(err) {
connection.provider.emitError('webrtc', err);
util.log('Failed to create answer, ', err);
});
}
/** Handle an SDP. */
Negotiator.handleSDP = function(type, connection, sdp) {
sdp = new RTCSessionDescription(sdp);
var pc = connection.pc;
util.log('Setting remote description', sdp);
pc.setRemoteDescription(sdp, function() {
util.log('Set remoteDescription:', type, 'for:', connection.peer);
if (type === 'OFFER') {
Negotiator._makeAnswer(connection);
}
}, function(err) {
connection.provider.emitError('webrtc', err);
util.log('Failed to setRemoteDescription, ', err);
});
}
/** Handle a candidate. */
Negotiator.handleCandidate = function(connection, ice) {
var candidate = ice.candidate;
var sdpMLineIndex = ice.sdpMLineIndex;
connection.pc.addIceCandidate(new RTCIceCandidate({
sdpMLineIndex: sdpMLineIndex,
candidate: candidate
}));
util.log('Added ICE candidate for:', connection.peer);
}
module.exports = Negotiator;
},{"./adapter":6,"./util":12}],10:[function(require,module,exports){
var util = require('./util');
var EventEmitter = require('eventemitter3');
var Socket = require('./socket');
var MediaConnection = require('./mediaconnection');
var DataConnection = require('./dataconnection');
/**
* A peer who can initiate connections with other peers.
*/
function Peer(id, options) {
if (!(this instanceof Peer)) return new Peer(id, options);
EventEmitter.call(this);
// Deal with overloading
if (id && id.constructor == Object) {
options = id;
id = undefined;
} else if (id) {
// Ensure id is a string
id = id.toString();
}
//
// Configurize options
options = util.extend({
debug: 0, // 1: Errors, 2: Warnings, 3: All logs
host: util.CLOUD_HOST,
port: util.CLOUD_PORT,
key: 'peerjs',
path: '/',
token: util.randomToken(),
config: util.defaultConfig
}, options);
this.options = options;
// Detect relative URL host.
if (options.host === '/') {
options.host = window.location.hostname;
}
// Set path correctly.
if (options.path[0] !== '/') {
options.path = '/' + options.path;
}
if (options.path[options.path.length - 1] !== '/') {
options.path += '/';
}
// Set whether we use SSL to same as current host
if (options.secure === undefined && options.host !== util.CLOUD_HOST) {
options.secure = util.isSecure();
}
// Set a custom log function if present
if (options.logFunction) {
util.setLogFunction(options.logFunction);
}
util.setLogLevel(options.debug);
//
// Sanity checks
// Ensure WebRTC supported
if (!util.supports.audioVideo && !util.supports.data ) {
this._delayedAbort('browser-incompatible', 'The current browser does not support WebRTC');
return;
}
// Ensure alphanumeric id
if (!util.validateId(id)) {
this._delayedAbort('invalid-id', 'ID "' + id + '" is invalid');
return;
}
// Ensure valid key
if (!util.validateKey(options.key)) {
this._delayedAbort('invalid-key', 'API KEY "' + options.key + '" is invalid');
return;
}
// Ensure not using unsecure cloud server on SSL page
if (options.secure && options.host === '0.peerjs.com') {
this._delayedAbort('ssl-unavailable',
'The cloud server currently does not support HTTPS. Please run your own PeerServer to use HTTPS.');
return;
}
//
// States.
this.destroyed = false; // Connections have been killed
this.disconnected = false; // Connection to PeerServer killed but P2P connections still active
this.open = false; // Sockets and such are not yet open.
//
// References
this.connections = {}; // DataConnections for this peer.
this._lostMessages = {}; // src => [list of messages]
//
// Start the server connection
this._initializeServerConnection();
if (id) {
this._initialize(id);
} else {
this._retrieveId();
}
//
}
util.inherits(Peer, EventEmitter);
// Initialize the 'socket' (which is actually a mix of XHR streaming and
// websockets.)
Peer.prototype._initializeServerConnection = function() {
var self = this;
this.socket = new Socket(this.options.secure, this.options.host, this.options.port, this.options.path, this.options.key);
this.socket.on('message', function(data) {
self._handleMessage(data);
});
this.socket.on('error', function(error) {
self._abort('socket-error', error);
});
this.socket.on('disconnected', function() {
// If we haven't explicitly disconnected, emit error and disconnect.
if (!self.disconnected) {
self.emitError('network', 'Lost connection to server.');
self.disconnect();
}
});
this.socket.on('close', function() {
// If we haven't explicitly disconnected, emit error.
if (!self.disconnected) {
self._abort('socket-closed', 'Underlying socket is already closed.');
}
});
};
/** Get a unique ID from the server via XHR. */
Peer.prototype._retrieveId = function(cb) {
var self = this;
var http = new XMLHttpRequest();
var protocol = this.options.secure ? 'https://' : 'http://';
var url = protocol + this.options.host + ':' + this.options.port +
this.options.path + this.options.key + '/id';
var queryString = '?ts=' + new Date().getTime() + '' + Math.random();
url += queryString;
// If there's no ID we need to wait for one before trying to init socket.
http.open('get', url, true);
http.onerror = function(e) {
util.error('Error retrieving ID', e);
var pathError = '';
if (self.options.path === '/' && self.options.host !== util.CLOUD_HOST) {
pathError = ' If you passed in a `path` to your self-hosted PeerServer, ' +
'you\'ll also need to pass in that same path when creating a new ' +
'Peer.';
}
self._abort('server-error', 'Could not get an ID from the server.' + pathError);
};
http.onreadystatechange = function() {
if (http.readyState !== 4) {
return;
}
if (http.status !== 200) {
http.onerror();
return;
}
self._initialize(http.responseText);
};
http.send(null);
};
/** Initialize a connection with the server. */
Peer.prototype._initialize = function(id) {
this.id = id;
this.socket.start(this.id, this.options.token);
};
/** Handles messages from the server. */
Peer.prototype._handleMessage = function(message) {
var type = message.type;
var payload = message.payload;
var peer = message.src;
var connection;
switch (type) {
case 'OPEN': // The connection to the server is open.
this.emit('open', this.id);
this.open = true;
break;
case 'ERROR': // Server error.
this._abort('server-error', payload.msg);
break;
case 'ID-TAKEN': // The selected ID is taken.
this._abort('unavailable-id', 'ID `' + this.id + '` is taken');
break;
case 'INVALID-KEY': // The given API key cannot be found.
this._abort('invalid-key', 'API KEY "' + this.options.key + '" is invalid');
break;
//
case 'LEAVE': // Another peer has closed its connection to this peer.
util.log('Received leave message from', peer);
this._cleanupPeer(peer);
break;
case 'EXPIRE': // The offer sent to a peer has expired without response.
this.emitError('peer-unavailable', 'Could not connect to peer ' + peer);
break;
case 'OFFER': // we should consider switching this to CALL/CONNECT, but this is the least breaking option.
var connectionId = payload.connectionId;
connection = this.getConnection(peer, connectionId);
if (connection) {
util.warn('Offer received for existing Connection ID:', connectionId);
//connection.handleMessage(message);
} else {
// Create a new connection.
if (payload.type === 'media') {
connection = new MediaConnection(peer, this, {
connectionId: connectionId,
_payload: payload,
metadata: payload.metadata
});
this._addConnection(peer, connection);
this.emit('call', connection);
} else if (payload.type === 'data') {
connection = new DataConnection(peer, this, {
connectionId: connectionId,
_payload: payload,
metadata: payload.metadata,
label: payload.label,
serialization: payload.serialization,
reliable: payload.reliable
});
this._addConnection(peer, connection);
this.emit('connection', connection);
} else {
util.warn('Received malformed connection type:', payload.type);
return;
}
// Find messages.
var messages = this._getMessages(connectionId);
for (var i = 0, ii = messages.length; i < ii; i += 1) {
connection.handleMessage(messages[i]);
}
}
break;
default:
if (!payload) {
util.warn('You received a malformed message from ' + peer + ' of type ' + type);
return;
}
var id = payload.connectionId;
connection = this.getConnection(peer, id);
if (connection && connection.pc) {
// Pass it on.
connection.handleMessage(message);
} else if (id) {
// Store for possible later use
this._storeMessage(id, message);
} else {
util.warn('You received an unrecognized message:', message);
}
break;
}
};
/** Stores messages without a set up connection, to be claimed later. */
Peer.prototype._storeMessage = function(connectionId, message) {
if (!this._lostMessages[connectionId]) {
this._lostMessages[connectionId] = [];
}
this._lostMessages[connectionId].push(message);
};
/** Retrieve messages from lost message store */
Peer.prototype._getMessages = function(connectionId) {
var messages = this._lostMessages[connectionId];
if (messages) {
delete this._lostMessages[connectionId];
return messages;
} else {
return [];
}
};
/**
* Returns a DataConnection to the specified peer. See documentation for a
* complete list of options.
*/
Peer.prototype.connect = function(peer, options) {
if (this.disconnected) {
util.warn('You cannot connect to a new Peer because you called ' +
'.disconnect() on this Peer and ended your connection with the ' +
'server. You can create a new Peer to reconnect, or call reconnect ' +
'on this peer if you believe its ID to still be available.');
this.emitError('disconnected', 'Cannot connect to new Peer after disconnecting from server.');
return;
}
var connection = new DataConnection(peer, this, options);
this._addConnection(peer, connection);
return connection;
};
/**
* Returns a MediaConnection to the specified peer. See documentation for a
* complete list of options.
*/
Peer.prototype.call = function(peer, stream, options) {
if (this.disconnected) {
util.warn('You cannot connect to a new Peer because you called ' +
'.disconnect() on this Peer and ended your connection with the ' +
'server. You can create a new Peer to reconnect.');
this.emitError('disconnected', 'Cannot connect to new Peer after disconnecting from server.');
return;
}
if (!stream) {
util.error('To call a peer, you must provide a stream from your browser\'s `getUserMedia`.');
return;
}
options = options || {};
options._stream = stream;
var call = new MediaConnection(peer, this, options);
this._addConnection(peer, call);
return call;
};
/** Add a data/media connection to this peer. */
Peer.prototype._addConnection = function(peer, connection) {
if (!this.connections[peer]) {
this.connections[peer] = [];
}
this.connections[peer].push(connection);
};
/** Retrieve a data/media connection for this peer. */
Peer.prototype.getConnection = function(peer, id) {
var connections = this.connections[peer];
if (!connections) {
return null;
}
for (var i = 0, ii = connections.length; i < ii; i++) {
if (connections[i].id === id) {
return connections[i];
}
}
return null;
};
Peer.prototype._delayedAbort = function(type, message) {
var self = this;
util.setZeroTimeout(function(){
self._abort(type, message);
});
};
/**
* Destroys the Peer and emits an error message.
* The Peer is not destroyed if it's in a disconnected state, in which case
* it retains its disconnected state and its existing connections.
*/
Peer.prototype._abort = function(type, message) {
util.error('Aborting!');
if (!this._lastServerId) {
this.destroy();
} else {
this.disconnect();
}
this.emitError(type, message);
};
/** Emits a typed error message. */
Peer.prototype.emitError = function(type, err) {
util.error('Error:', err);
if (typeof err === 'string') {
err = new Error(err);
}
err.type = type;
this.emit('error', err);
};
/**
* Destroys the Peer: closes all active connections as well as the connection
* to the server.
* Warning: The peer can no longer create or accept connections after being
* destroyed.
*/
Peer.prototype.destroy = function() {
if (!this.destroyed) {
this._cleanup();
this.disconnect();
this.destroyed = true;
}
};
/** Disconnects every connection on this peer. */
Peer.prototype._cleanup = function() {
if (this.connections) {
var peers = Object.keys(this.connections);
for (var i = 0, ii = peers.length; i < ii; i++) {
this._cleanupPeer(peers[i]);
}
}
this.emit('close');
};
/** Closes all connections to this peer. */
Peer.prototype._cleanupPeer = function(peer) {
var connections = this.connections[peer];
for (var j = 0, jj = connections.length; j < jj; j += 1) {
connections[j].close();
}
};
/**
* Disconnects the Peer's connection to the PeerServer. Does not close any
* active connections.
* Warning: The peer can no longer create or accept connections after being
* disconnected. It also cannot reconnect to the server.
*/
Peer.prototype.disconnect = function() {
var self = this;
util.setZeroTimeout(function(){
if (!self.disconnected) {
self.disconnected = true;
self.open = false;
if (self.socket) {
self.socket.close();
}
self.emit('disconnected', self.id);
self._lastServerId = self.id;
self.id = null;
}
});
};
/** Attempts to reconnect with the same ID. */
Peer.prototype.reconnect = function() {
if (this.disconnected && !this.destroyed) {
util.log('Attempting reconnection to server with ID ' + this._lastServerId);
this.disconnected = false;
this._initializeServerConnection();
this._initialize(this._lastServerId);
} else if (this.destroyed) {
throw new Error('This peer cannot reconnect to the server. It has already been destroyed.');
} else if (!this.disconnected && !this.open) {
// Do nothing. We're still connecting the first time.
util.error('In a hurry? We\'re still trying to make the initial connection!');
} else {
throw new Error('Peer ' + this.id + ' cannot reconnect because it is not disconnected from the server!');
}
};
/**
* Get a list of available peer IDs. If you're running your own server, you'll
* want to set allow_discovery: true in the PeerServer options. If you're using
* the cloud server, email [email protected] to get the functionality enabled for
* your key.
*/
Peer.prototype.listAllPeers = function(cb) {
cb = cb || function() {};
var self = this;
var http = new XMLHttpRequest();
var protocol = this.options.secure ? 'https://' : 'http://';
var url = protocol + this.options.host + ':' + this.options.port +
this.options.path + this.options.key + '/peers';
var queryString = '?ts=' + new Date().getTime() + '' + Math.random();
url += queryString;
// If there's no ID we need to wait for one before trying to init socket.
http.open('get', url, true);
http.onerror = function(e) {
self._abort('server-error', 'Could not get peers from the server.');
cb([]);
};
http.onreadystatechange = function() {
if (http.readyState !== 4) {
return;
}
if (http.status === 401) {
var helpfulError = '';
if (self.options.host !== util.CLOUD_HOST) {
helpfulError = 'It looks like you\'re using the cloud server. You can email ' +
'[email protected] to enable peer listing for your API key.';
} else {
helpfulError = 'You need to enable `allow_discovery` on your self-hosted ' +
'PeerServer to use this feature.';
}
cb([]);
throw new Error('It doesn\'t look like you have permission to list peers IDs. ' + helpfulError);
} else if (http.status !== 200) {
cb([]);
} else {
cb(JSON.parse(http.responseText));
}
};
http.send(null);
};
module.exports = Peer;
},{"./dataconnection":7,"./mediaconnection":8,"./socket":11,"./util":12,"eventemitter3":17}],11:[function(require,module,exports){
var util = require('./util');
var EventEmitter = require('eventemitter3');
/**
* An abstraction on top of WebSockets and XHR streaming to provide fastest
* possible connection for peers.
*/
function Socket(secure, host, port, path, key) {
if (!(this instanceof Socket)) return new Socket(secure, host, port, path, key);
EventEmitter.call(this);
// Disconnected manually.
this.disconnected = false;
this._queue = [];
var httpProtocol = secure ? 'https://' : 'http://';
var wsProtocol = secure ? 'wss://' : 'ws://';
this._httpUrl = httpProtocol + host + ':' + port + path + key;
this._wsUrl = wsProtocol + host + ':' + port + path + 'peerjs?key=' + key;
}
util.inherits(Socket, EventEmitter);
/** Check in with ID or get one from server. */
Socket.prototype.start = function (id, token) {
this.id = id;
this._httpUrl += '/' + id + '/' + token;
this._wsUrl += '&id=' + id + '&token=' + token;
this._startXhrStream();
this._startWebSocket();
}
/** Start up websocket communications. */
Socket.prototype._startWebSocket = function (id) {
var self = this;
if (this._socket) {
return;
}
this._socket = new WebSocket(this._wsUrl);
this._socket.onmessage = function (event) {
try {
var data = JSON.parse(event.data);
} catch (e) {
util.log('Invalid server message', event.data);
return;
}
self.emit('message', data);
};
this._socket.onclose = function (event) {
util.log('Socket closed.');
self.disconnected = true;
self.emit('disconnected');
};
// Take care of the queue of connections if necessary and make sure Peer knows
// socket is open.
this._socket.onopen = function () {
if (self._timeout) {
clearTimeout(self._timeout);
setTimeout(function () {
self._http.abort();
self._http = null;
}, 5000);
}
self._sendQueuedMessages();
util.log('Socket open');
};
}
/** Start XHR streaming. */
Socket.prototype._startXhrStream = function (n) {
try {
var self = this;
this._http = new XMLHttpRequest();
this._http._index = 1;
this._http._streamIndex = n || 0;
this._http.open('post', this._httpUrl + '/id?i=' + this._http._streamIndex, true);
this._http.onerror = function () {
// If we get an error, likely something went wrong.
// Stop streaming.
clearTimeout(self._timeout);
self.emit('disconnected');
}
this._http.onreadystatechange = function () {
if (this.readyState == 2 && this.old) {
this.old.abort();
delete this.old;
} else if (this.readyState > 2 && this.status === 200 && this.responseText) {
self._handleStream(this);
}
};
this._http.send(null);
this._setHTTPTimeout();
} catch (e) {
util.log('XMLHttpRequest not available; defaulting to WebSockets');
}
}
/** Handles onreadystatechange response as a stream. */
Socket.prototype._handleStream = function (http) {
// 3 and 4 are loading/done state. All others are not relevant.
var messages = http.responseText.split('\n');
// Check to see if anything needs to be processed on buffer.
if (http._buffer) {
while (http._buffer.length > 0) {
var index = http._buffer.shift();
var bufferedMessage = messages[index];
try {
bufferedMessage = JSON.parse(bufferedMessage);
} catch (e) {
http._buffer.shift(index);
break;
}
this.emit('message', bufferedMessage);
}
}
var message = messages[http._index];
if (message) {
http._index += 1;
// Buffering--this message is incomplete and we'll get to it next time.
// This checks if the httpResponse ended in a `\n`, in which case the last
// element of messages should be the empty string.
if (http._index === messages.length) {
if (!http._buffer) {
http._buffer = [];
}
http._buffer.push(http._index - 1);
} else {
try {
message = JSON.parse(message);
} catch (e) {
util.log('Invalid server message', message);
return;
}
this.emit('message', message);
}
}
}
Socket.prototype._setHTTPTimeout = function () {
var self = this;
this._timeout = setTimeout(function () {
var old = self._http;
if (!self._wsOpen()) {
self._startXhrStream(old._streamIndex + 1);
self._http.old = old;
} else {
old.abort();
}
}, 25000);
}
/** Is the websocket currently open? */
Socket.prototype._wsOpen = function () {
return this._socket && this._socket.readyState == 1;
}
/** Send queued messages. */
Socket.prototype._sendQueuedMessages = function () {
for (var i = 0, ii = this._queue.length; i < ii; i += 1) {
this.send(this._queue[i]);
}
}
/** Exposed send for DC & Peer. */
Socket.prototype.send = function (data) {
if (this.disconnected) {
return;
}
// If we didn't get an ID yet, we can't yet send anything so we should queue
// up these messages.
if (!this.id || !this._wsOpen()) { // only sent it from ws
this._queue.push(data);
return;
}
if (!data.type) {
this.emit('error', 'Invalid message');
return;
}
var message = JSON.stringify(data);
if (this._wsOpen()) {
this._socket.send(message);
} else {
var http = new XMLHttpRequest();
var url = this._httpUrl + '/' + data.type.toLowerCase();
http.open('post', url, true);
http.setRequestHeader('Content-Type', 'application/json');
http.send(message);
}
}
Socket.prototype.close = function () {
if (!this.disconnected && this._wsOpen()) {
this._socket.close();
this.disconnected = true;
}
}
module.exports = Socket;
},{"./util":12,"eventemitter3":17}],12:[function(require,module,exports){
var defaultConfig = {'iceServers': [{ 'url': 'stun:stun.l.google.com:19302' }]};
var dataCount = 1;
var BinaryPack = require('js-binarypack');
var RTCPeerConnection = require('./adapter').RTCPeerConnection;
var util = {
noop: function() {},
CLOUD_HOST: '0.peerjs.com',
CLOUD_PORT: 9000,
// Browsers that need chunking:
chunkedBrowsers: {'Chrome': 1},
chunkedMTU: 16300, // The original 60000 bytes setting does not work when sending data from Firefox to Chrome, which is "cut off" after 16384 bytes and delivered individually.
// Logging logic
logLevel: 0,
setLogLevel: function(level) {
var debugLevel = parseInt(level, 10);
if (!isNaN(parseInt(level, 10))) {
util.logLevel = debugLevel;
} else {
// If they are using truthy/falsy values for debug
util.logLevel = level ? 3 : 0;
}
util.log = util.warn = util.error = util.noop;
if (util.logLevel > 0) {
util.error = util._printWith('ERROR');
}
if (util.logLevel > 1) {
util.warn = util._printWith('WARNING');
}
if (util.logLevel > 2) {
util.log = util._print;
}
},
setLogFunction: function(fn) {
if (fn.constructor !== Function) {
util.warn('The log function you passed in is not a function. Defaulting to regular logs.');
} else {
util._print = fn;
}
},
_printWith: function(prefix) {
return function() {
var copy = Array.prototype.slice.call(arguments);
copy.unshift(prefix);
util._print.apply(util, copy);
};
},
_print: function () {
var err = false;
var copy = Array.prototype.slice.call(arguments);
copy.unshift('PeerJS: ');
for (var i = 0, l = copy.length; i < l; i++){
if (copy[i] instanceof Error) {
copy[i] = '(' + copy[i].name + ') ' + copy[i].message;
err = true;
}
}
err ? console.error.apply(console, copy) : console.log.apply(console, copy);
},
//
// Returns browser-agnostic default config
defaultConfig: defaultConfig,
//
// Returns the current browser.
browser: (function() {
if (window.mozRTCPeerConnection) {
return 'Firefox';
} else if (window.webkitRTCPeerConnection) {
return 'Chrome';
} else if (window.RTCPeerConnection) {
return 'Supported';
} else {
return 'Unsupported';
}
})(),
//
// Lists which features are supported
supports: (function() {
if (typeof RTCPeerConnection === 'undefined') {
return {};
}
var data = true;
var audioVideo = true;
var binaryBlob = false;
var sctp = false;
var onnegotiationneeded = !!window.webkitRTCPeerConnection;
var pc, dc;
try {
pc = new RTCPeerConnection(defaultConfig, {optional: [{RtpDataChannels: true}]});
} catch (e) {
data = false;
audioVideo = false;
}
if (data) {
try {
dc = pc.createDataChannel('_PEERJSTEST');
} catch (e) {
data = false;
}
}
if (data) {
// Binary test
try {
dc.binaryType = 'blob';
binaryBlob = true;
} catch (e) {
}
// Reliable test.
// Unfortunately Chrome is a bit unreliable about whether or not they
// support reliable.
var reliablePC = new RTCPeerConnection(defaultConfig, {});
try {
var reliableDC = reliablePC.createDataChannel('_PEERJSRELIABLETEST', {});
sctp = reliableDC.reliable;
} catch (e) {
}
reliablePC.close();
}
// FIXME: not really the best check...
if (audioVideo) {
audioVideo = !!pc.addStream;
}
// FIXME: this is not great because in theory it doesn't work for
// av-only browsers (?).
if (!onnegotiationneeded && data) {
// sync default check.
var negotiationPC = new RTCPeerConnection(defaultConfig, {optional: [{RtpDataChannels: true}]});
negotiationPC.onnegotiationneeded = function() {
onnegotiationneeded = true;
// async check.
if (util && util.supports) {
util.supports.onnegotiationneeded = true;
}
};
negotiationPC.createDataChannel('_PEERJSNEGOTIATIONTEST');
setTimeout(function() {
negotiationPC.close();
}, 1000);
}
if (pc) {
pc.close();
}
return {
audioVideo: audioVideo,
data: data,
binaryBlob: binaryBlob,
binary: sctp, // deprecated; sctp implies binary support.
reliable: sctp, // deprecated; sctp implies reliable data.
sctp: sctp,
onnegotiationneeded: onnegotiationneeded
};
}()),
//
// Ensure alphanumeric ids
validateId: function(id) {
// Allow empty ids
return !id || /^[A-Za-z0-9]+(?:[ _-][A-Za-z0-9]+)*$/.exec(id);
},
validateKey: function(key) {
// Allow empty keys
return !key || /^[A-Za-z0-9]+(?:[ _-][A-Za-z0-9]+)*$/.exec(key);
},
debug: false,
inherits: function(ctor, superCtor) {
ctor.super_ = superCtor;
ctor.prototype = Object.create(superCtor.prototype, {
constructor: {
value: ctor,
enumerable: false,
writable: true,
configurable: true
}
});
},
extend: function(dest, source) {
for(var key in source) {
if(source.hasOwnProperty(key)) {
dest[key] = source[key];
}
}
return dest;
},
pack: BinaryPack.pack,
unpack: BinaryPack.unpack,
log: function () {
if (util.debug) {
var err = false;
var copy = Array.prototype.slice.call(arguments);
copy.unshift('PeerJS: ');
for (var i = 0, l = copy.length; i < l; i++){
if (copy[i] instanceof Error) {
copy[i] = '(' + copy[i].name + ') ' + copy[i].message;
err = true;
}
}
err ? console.error.apply(console, copy) : console.log.apply(console, copy);
}
},
setZeroTimeout: (function(global) {
var timeouts = [];
var messageName = 'zero-timeout-message';
// Like setTimeout, but only takes a function argument. There's
// no time argument (always zero) and no arguments (you have to
// use a closure).
function setZeroTimeoutPostMessage(fn) {
timeouts.push(fn);
global.postMessage(messageName, '*');
}
function handleMessage(event) {
if (event.source == global && event.data == messageName) {
if (event.stopPropagation) {
event.stopPropagation();
}
if (timeouts.length) {
timeouts.shift()();
}
}
}
if (global.addEventListener) {
global.addEventListener('message', handleMessage, true);
} else if (global.attachEvent) {
global.attachEvent('onmessage', handleMessage);
}
return setZeroTimeoutPostMessage;
}(window)),
// Binary stuff
// chunks a blob.
chunk: function(bl) {
var chunks = [];
var size = bl.size;
var start = index = 0;
var total = Math.ceil(size / util.chunkedMTU);
while (start < size) {
var end = Math.min(size, start + util.chunkedMTU);
var b = bl.slice(start, end);
var chunk = {
__peerData: dataCount,
n: index,
data: b,
total: total
};
chunks.push(chunk);
start = end;
index += 1;
}
dataCount += 1;
return chunks;
},
blobToArrayBuffer: function(blob, cb){
var fr = new FileReader();
fr.onload = function(evt) {
cb(evt.target.result);
};
fr.readAsArrayBuffer(blob);
},
blobToBinaryString: function(blob, cb){
var fr = new FileReader();
fr.onload = function(evt) {
cb(evt.target.result);
};
fr.readAsBinaryString(blob);
},
binaryStringToArrayBuffer: function(binary) {
var byteArray = new Uint8Array(binary.length);
for (var i = 0; i < binary.length; i++) {
byteArray[i] = binary.charCodeAt(i) & 0xff;
}
return byteArray.buffer;
},
randomToken: function () {
return Math.random().toString(36).substr(2);
},
//
isSecure: function() {
return location.protocol === 'https:';
}
};
module.exports = util;
},{"./adapter":6,"js-binarypack":18}],13:[function(require,module,exports){
var EventEmitter, FlintConstants, FlintSenderManager, Peer, Platform, SenderMessageBus, SenderMessageChannel,
__hasProp = {}.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; };
EventEmitter = require('eventemitter3');
SenderMessageChannel = require('./SenderMessageChannel');
SenderMessageBus = require('./SenderMessageBus');
Peer = require('../peerjs/peer');
FlintConstants = require('../common/FlintConstants');
Platform = require('../common/Platform');
FlintSenderManager = (function(_super) {
__extends(FlintSenderManager, _super);
function FlintSenderManager(options) {
this.options = options;
if (!this.options) {
throw 'FlintSenderManager constructor error';
}
this.appId = this.options.appId;
this.urlBase = this.options.urlBase;
this.serviceUrl = this.urlBase + '/apps/' + this.appId;
this.host = this.options.host;
if (this.options.useHeartbeat === void 0) {
this.useHeartbeat = true;
} else {
this.useHeartbeat = this.options.useHeartbeat;
}
this.appState = {};
this.additionalData = {};
this.token = null;
this.heartbeatInterval = 3 * 1000;
this.heartbeatTimerId = null;
this.messageChannel = null;
this.messageBusList = {};
}
FlintSenderManager.prototype.getAdditionalData = function() {
return this.additionalData['customData'];
};
FlintSenderManager.prototype.getState = function(callback) {
var headers;
headers = {
'Accept': 'application/xml; charset=utf8'
};
return this._getState(this.serviceUrl, headers, (function(_this) {
return function(result, state) {
return typeof callback === "function" ? callback(result, state, _this.additionalData) : void 0;
};
})(this));
};
FlintSenderManager.prototype._getState = function(url, headers, callback) {
return this._request('GET', url, headers, null, (function(_this) {
return function(statusCode, responseText) {
if (statusCode === 200) {
_this._parseState(responseText);
return typeof callback === "function" ? callback(true, _this.appState.state) : void 0;
} else {
return typeof callback === "function" ? callback(false, 'unknow') : void 0;
}
};
})(this));
};
FlintSenderManager.prototype._parseState = function(state) {
var additionalData, doc, lines, link, parser, responseText;
lines = state.split('\n');
lines.splice(0, 1);
responseText = lines.join('');
parser = new DOMParser();
doc = parser.parseFromString(responseText, 'text/xml');
this.appState.name = doc.getElementsByTagName('name')[0].innerHTML;
this.appState.state = doc.getElementsByTagName('state')[0].innerHTML;
link = doc.getElementsByTagName('link');
if (link && link[0]) {
this.appState.href = link[0].getAttribute('href');
}
additionalData = doc.getElementsByTagName('additionalData');
return this._parseAdditionalData(additionalData);
};
FlintSenderManager.prototype._parseAdditionalData = function(additionalData) {
var i, items, key, value, _tmpAdditionalData;
if ((additionalData != null ? additionalData.length : void 0) > 0) {
items = additionalData[0].childNodes;
if (items) {
_tmpAdditionalData = {};
for (i in items) {
if (items[i].tagName && items[i].innerHTML) {
_tmpAdditionalData[items[i].tagName] = items[i].innerHTML;
}
}
for (key in _tmpAdditionalData) {
value = _tmpAdditionalData[key];
if (this.additionalData[key] !== value) {
this.emit(key + 'available', value);
}
}
return this.additionalData = _tmpAdditionalData;
}
}
};
FlintSenderManager.prototype.launch = function(appInfo, callback) {
return this._launch('launch', appInfo, callback);
};
FlintSenderManager.prototype.relaunch = function(appInfo, callback) {
return this._launch('relaunch', appInfo, callback);
};
FlintSenderManager.prototype.join = function(appInfo, callback) {
return this._launch('join', appInfo, callback);
};
FlintSenderManager.prototype._onLaunchResult = function(type, result, token, callback) {
if (typeof callback === "function") {
callback(type, result, token);
}
if (result) {
console.log(type, ' is ok, getState once');
return setTimeout(((function(_this) {
return function() {
return _this.getState();
};
})(this)), 500);
} else {
console.log(type, ' is failed, stop heartbeat');
return this._stopHeartbeat();
}
};
FlintSenderManager.prototype._launch = function(launchType, appInfo, callback) {
var data, headers;
if ((launchType === 'launch') || (launchType === 'relaunch')) {
if ((!appInfo) || (!appInfo.appUrl)) {
throw 'empty appInfo or appUrl';
}
}
if (appInfo.useIpc === void 0) {
appInfo.useIpc = false;
}
if ((!appInfo.useIpc) && (appInfo.maxInactive === void 0)) {
appInfo.maxInactive = -1;
}
data = {
type: launchType,
app_info: {
url: appInfo.appUrl,
useIpc: appInfo.useIpc,
maxInactive: appInfo.maxInactive
}
};
headers = {
'Content-Type': 'application/json'
};
return this._request('POST', this.serviceUrl, headers, data, (function(_this) {
return function(statusCode, responseText) {
var content, counter, pollingCallback, _headers;
if ((statusCode === 200) || (statusCode === 201)) {
content = JSON.parse(responseText);
if (content && content.token && content.interval) {
_this.token = content.token;
if (content.interval <= 3000) {
content.interval = 3000;
}
_this.heartbeatInterval = content.interval;
if (_this.useHeartbeat) {
_this._startHeartbeat();
}
counter = 1;
_headers = {
'Accept': 'application/xml; charset=utf8',
'Authorization': _this.token
};
pollingCallback = function() {
console.log('wait for launching ', counter, ' times');
if (counter < 10) {
counter += 1;
return _this._getState(_this.serviceUrl, _headers, function(result, state) {
if (result && (state === 'running')) {
return _this._onLaunchResult('app' + launchType, true, _this.token, callback);
} else {
return setTimeout((function() {
return pollingCallback();
}), 1000);
}
});
} else {
return _this._onLaunchResult('app' + launchType, false, null, callback);
}
};
return pollingCallback();
} else {
return _this._onLaunchResult('app' + launchType, false, null, callback);
}
} else {
return _this._onLaunchResult('app' + launchType, false, null, callback);
}
};
})(this));
};
FlintSenderManager.prototype._startHeartbeat = function() {
this._stopHeartbeat();
return this.heartbeatTimerId = setInterval(((function(_this) {
return function() {
var headers;
headers = {
'Accept': 'application/xml; charset=utf8',
'Authorization': _this.token
};
return _this._getState(_this.serviceUrl, headers, function(result, state) {
return _this.emit('appstate', result, state, _this.additionalData);
});
};
})(this)), this.heartbeatInterval);
};
FlintSenderManager.prototype._stopHeartbeat = function() {
if (this.heartbeatTimerId) {
return clearInterval(this.heartbeatTimerId);
}
};
FlintSenderManager.prototype.stop = function(appInfo, callback) {
var headers;
this._stopHeartbeat();
headers = {
'Accept': 'application/xml; charset=utf8'
};
if (this.token) {
headers['Authorization'] = this.token;
} else {
headers['Authorization'] = 'bad-token';
}
return this._getState(this.serviceUrl, headers, (function(_this) {
return function(result, state) {
var url;
if (result) {
if (state === 'stopped') {
return _this._onStop('appstop', true, callback);
} else {
url = _this.serviceUrl + '/' + _this.appState.href;
return _this._stop('stop', url, callback);
}
} else {
console.warn('stop failed, try join!');
return _this.join(appInfo, function(_type, _result, _token) {
_this._stopHeartbeat();
if (_result) {
console.log('join ok, use token = ', _token, ' to stop!');
_this.token = _token;
url = _this.serviceUrl + '/' + _this.appState.href;
return _this._stop('stop', url, callback);
} else {
return _this._onStop('appstop', false, callback);
}
});
}
};
})(this));
};
FlintSenderManager.prototype.disconnect = function(callback) {
this._stopHeartbeat();
return this._stop('disconnect', this.serviceUrl, callback);
};
FlintSenderManager.prototype._onStop = function(type, result, callback) {
return typeof callback === "function" ? callback(type, result) : void 0;
};
FlintSenderManager.prototype._stop = function(stopType, url, callback) {
var headers;
if (!this.token) {
throw 'empty token, cannot stop';
}
headers = {
'Authorization': this.token
};
return this._request('DELETE', url, headers, null, (function(_this) {
return function(statusCode, responseText) {
_this._clean();
if (statusCode === 200) {
return _this._onStop('app' + stopType, true, callback);
} else {
return _this._onStop('app' + stopType, false, callback);
}
};
})(this));
};
FlintSenderManager.prototype._request = function(method, url, headers, data, callback) {
var key, value, xhr;
console.log('request: method -> ', method, ', url -> ', url, ', headers -> ', headers);
xhr = this._createXhr();
if (!xhr) {
throw 'request: failed';
}
xhr.open(method, url);
if (headers) {
for (key in headers) {
value = headers[key];
xhr.setRequestHeader(key, value);
}
}
xhr.onreadystatechange = (function(_this) {
return function() {
if (xhr.readyState === 4) {
console.log('FlintSenderManager received:\n', xhr.responseText);
return typeof callback === "function" ? callback(xhr.status, xhr.responseText) : void 0;
}
};
})(this);
if (data) {
return xhr.send(JSON.stringify(data));
} else {
return xhr.send('');
}
};
FlintSenderManager.prototype._createXhr = function() {
var platform;
platform = Platform.getPlatform().browser;
if (platform === 'ffos') {
return new XMLHttpRequest({
mozSystem: true
});
} else {
return new XMLHttpRequest();
}
};
FlintSenderManager.prototype._createMessageChannel = function() {
if (!this.messageChannel) {
this.messageChannel = new SenderMessageChannel(FlintConstants.DEFAULT_CHANNEL_NAME);
this.messageChannel.on('open', (function(_this) {
return function() {
return console.log('sender message channel open!!!');
};
})(this));
this.messageChannel.on('close', (function(_this) {
return function() {
return console.log('sender message channel close!!!');
};
})(this));
this.messageChannel.on('error', (function(_this) {
return function() {
return console.log('sender message channel error!!!');
};
})(this));
this._openMessageChannel(this.messageChannel);
}
return this.messageChannel;
};
FlintSenderManager.prototype._openMessageChannel = function(channel) {
return this.once(channel.getName() + 'available', (function(_this) {
return function(channelUrl) {
var url;
console.log('available: ', channel.getName() + 'available');
url = channelUrl + '/senders/' + _this.token;
console.log(channel.getName(), ' open url: ', url);
return channel.open(url);
};
})(this));
};
FlintSenderManager.prototype.createMessageBus = function(namespace) {
var messageBus;
if (!namespace) {
namespace = FlintConstants.DEFAULT_NAMESPACE;
}
if (!this.messageChannel) {
this.messageChannel = this._createMessageChannel();
}
messageBus = this._createMessageBus(namespace);
return messageBus;
};
FlintSenderManager.prototype._createMessageBus = function(namespace) {
var messageBus;
messageBus = null;
if (this.messageBusList[namespace]) {
messageBus = this.messageBusList[namespace];
} else {
messageBus = new SenderMessageBus(this.messageChannel, namespace);
this.messageBusList[namespace] = messageBus;
}
return messageBus;
};
FlintSenderManager.prototype._createPeer = function() {
var peer;
peer = new Peer({
host: this.host,
port: '9433',
secure: false
});
return peer;
};
FlintSenderManager.prototype.connectReceiverDataPeer = function(options) {
var peer;
peer = new Peer({
host: this.host,
port: '9433',
secure: false
});
peer.on('open', (function(_this) {
return function(peerId) {
console.log('peer [', peerId, '] opened!!!');
if (_this.additionalData['dataPeerId']) {
return peer.connect(_this.additionalData['dataPeerId'], options);
} else {
return _this.once('dataPeerId' + 'available', function(peerId) {
return peer.connect(peerId, options);
});
}
};
})(this));
return peer;
};
FlintSenderManager.prototype.callReceiverMediaPeer = function(stream, options) {
var peer;
peer = new Peer({
host: this.host,
port: '9433',
secure: false
});
peer.on('open', (function(_this) {
return function(peerId) {
console.log('peer [', peerId, '] opened!!!');
if (_this.additionalData['mediaPeerId']) {
return peer.call(_this.additionalData['mediaPeerId'], stream, options);
} else {
return _this.once('mediaPeerId' + 'available', function(peerId) {
return peer.call(peerId, stream, options);
});
}
};
})(this));
return peer;
};
FlintSenderManager.prototype._clean = function() {
var _ref;
if ((_ref = this.messageChannel) != null) {
_ref.close();
}
this.messageChannel = null;
return this.messageBusList = null;
};
return FlintSenderManager;
})(EventEmitter);
module.exports = FlintSenderManager;
},{"../common/FlintConstants":2,"../common/Platform":5,"../peerjs/peer":10,"./SenderMessageBus":14,"./SenderMessageChannel":15,"eventemitter3":17}],14:[function(require,module,exports){
var MessageBus, SenderMessageBus,
__hasProp = {}.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; };
MessageBus = require('../common/MessageBus');
SenderMessageBus = (function(_super) {
__extends(SenderMessageBus, _super);
function SenderMessageBus(channel, namespace) {
SenderMessageBus.__super__.constructor.call(this, channel, namespace);
}
SenderMessageBus.prototype._init = function() {
return this.channel.on('message', (function(_this) {
return function(message) {
var data, e;
try {
data = JSON.parse(message);
if ((data.namespace === _this.namespace) && data.payload) {
return _this.emit('message', data.payload);
}
} catch (_error) {
e = _error;
}
};
})(this));
};
SenderMessageBus.prototype.send = function(data) {
var message;
message = {
namespace: this.namespace,
payload: data
};
return this.channel.send(JSON.stringify(message));
};
return SenderMessageBus;
})(MessageBus);
module.exports = SenderMessageBus;
},{"../common/MessageBus":3}],15:[function(require,module,exports){
var MessageChannel, SenderMessageChannel,
__hasProp = {}.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; };
MessageChannel = require('../common/MessageChannel');
SenderMessageChannel = (function(_super) {
__extends(SenderMessageChannel, _super);
function SenderMessageChannel(name, url) {
SenderMessageChannel.__super__.constructor.call(this, name, url);
}
return SenderMessageChannel;
})(MessageChannel);
module.exports = SenderMessageChannel;
},{"../common/MessageChannel":4}],16:[function(require,module,exports){
window.FlintSenderManager = require('./FlintSenderManager');
},{"./FlintSenderManager":13}],17:[function(require,module,exports){
'use strict';
/**
* Representation of a single EventEmitter function.
*
* @param {Function} fn Event handler to be called.
* @param {Mixed} context Context for function execution.
* @param {Boolean} once Only emit once
* @api private
*/
function EE(fn, context, once) {
this.fn = fn;
this.context = context;
this.once = once || false;
}
/**
* Minimal EventEmitter interface that is molded against the Node.js
* EventEmitter interface.
*
* @constructor
* @api public
*/
function EventEmitter() { /* Nothing to set */ }
/**
* Holds the assigned EventEmitters by name.
*
* @type {Object}
* @private
*/
EventEmitter.prototype._events = undefined;
/**
* Return a list of assigned event listeners.
*
* @param {String} event The events that should be listed.
* @returns {Array}
* @api public
*/
EventEmitter.prototype.listeners = function listeners(event) {
if (!this._events || !this._events[event]) return [];
if (this._events[event].fn) return [this._events[event].fn];
for (var i = 0, l = this._events[event].length, ee = new Array(l); i < l; i++) {
ee[i] = this._events[event][i].fn;
}
return ee;
};
/**
* Emit an event to all registered event listeners.
*
* @param {String} event The name of the event.
* @returns {Boolean} Indication if we've emitted an event.
* @api public
*/
EventEmitter.prototype.emit = function emit(event, a1, a2, a3, a4, a5) {
if (!this._events || !this._events[event]) return false;
var listeners = this._events[event]
, len = arguments.length
, args
, i;
if ('function' === typeof listeners.fn) {
if (listeners.once) this.removeListener(event, listeners.fn, true);
switch (len) {
case 1: return listeners.fn.call(listeners.context), true;
case 2: return listeners.fn.call(listeners.context, a1), true;
case 3: return listeners.fn.call(listeners.context, a1, a2), true;
case 4: return listeners.fn.call(listeners.context, a1, a2, a3), true;
case 5: return listeners.fn.call(listeners.context, a1, a2, a3, a4), true;
case 6: return listeners.fn.call(listeners.context, a1, a2, a3, a4, a5), true;
}
for (i = 1, args = new Array(len -1); i < len; i++) {
args[i - 1] = arguments[i];
}
listeners.fn.apply(listeners.context, args);
} else {
var length = listeners.length
, j;
for (i = 0; i < length; i++) {
if (listeners[i].once) this.removeListener(event, listeners[i].fn, true);
switch (len) {
case 1: listeners[i].fn.call(listeners[i].context); break;
case 2: listeners[i].fn.call(listeners[i].context, a1); break;
case 3: listeners[i].fn.call(listeners[i].context, a1, a2); break;
default:
if (!args) for (j = 1, args = new Array(len -1); j < len; j++) {
args[j - 1] = arguments[j];
}
listeners[i].fn.apply(listeners[i].context, args);
}
}
}
return true;
};
/**
* Register a new EventListener for the given event.
*
* @param {String} event Name of the event.
* @param {Functon} fn Callback function.
* @param {Mixed} context The context of the function.
* @api public
*/
EventEmitter.prototype.on = function on(event, fn, context) {
var listener = new EE(fn, context || this);
if (!this._events) this._events = {};
if (!this._events[event]) this._events[event] = listener;
else {
if (!this._events[event].fn) this._events[event].push(listener);
else this._events[event] = [
this._events[event], listener
];
}
return this;
};
/**
* Add an EventListener that's only called once.
*
* @param {String} event Name of the event.
* @param {Function} fn Callback function.
* @param {Mixed} context The context of the function.
* @api public
*/
EventEmitter.prototype.once = function once(event, fn, context) {
var listener = new EE(fn, context || this, true);
if (!this._events) this._events = {};
if (!this._events[event]) this._events[event] = listener;
else {
if (!this._events[event].fn) this._events[event].push(listener);
else this._events[event] = [
this._events[event], listener
];
}
return this;
};
/**
* Remove event listeners.
*
* @param {String} event The event we want to remove.
* @param {Function} fn The listener that we need to find.
* @param {Boolean} once Only remove once listeners.
* @api public
*/
EventEmitter.prototype.removeListener = function removeListener(event, fn, once) {
if (!this._events || !this._events[event]) return this;
var listeners = this._events[event]
, events = [];
if (fn) {
if (listeners.fn && (listeners.fn !== fn || (once && !listeners.once))) {
events.push(listeners);
}
if (!listeners.fn) for (var i = 0, length = listeners.length; i < length; i++) {
if (listeners[i].fn !== fn || (once && !listeners[i].once)) {
events.push(listeners[i]);
}
}
}
//
// Reset the array, or remove it completely if we have no more listeners.
//
if (events.length) {
this._events[event] = events.length === 1 ? events[0] : events;
} else {
delete this._events[event];
}
return this;
};
/**
* Remove all listeners or only the listeners for the specified event.
*
* @param {String} event The event want to remove all listeners for.
* @api public
*/
EventEmitter.prototype.removeAllListeners = function removeAllListeners(event) {
if (!this._events) return this;
if (event) delete this._events[event];
else this._events = {};
return this;
};
//
// Alias methods names because people roll like that.
//
EventEmitter.prototype.off = EventEmitter.prototype.removeListener;
EventEmitter.prototype.addListener = EventEmitter.prototype.on;
//
// This function doesn't apply anymore.
//
EventEmitter.prototype.setMaxListeners = function setMaxListeners() {
return this;
};
//
// Expose the module.
//
EventEmitter.EventEmitter = EventEmitter;
EventEmitter.EventEmitter2 = EventEmitter;
EventEmitter.EventEmitter3 = EventEmitter;
//
// Expose the module.
//
module.exports = EventEmitter;
},{}],18:[function(require,module,exports){
var BufferBuilder = require('./bufferbuilder').BufferBuilder;
var binaryFeatures = require('./bufferbuilder').binaryFeatures;
var BinaryPack = {
unpack: function(data){
var unpacker = new Unpacker(data);
return unpacker.unpack();
},
pack: function(data){
var packer = new Packer();
packer.pack(data);
var buffer = packer.getBuffer();
return buffer;
}
};
module.exports = BinaryPack;
function Unpacker (data){
// Data is ArrayBuffer
this.index = 0;
this.dataBuffer = data;
this.dataView = new Uint8Array(this.dataBuffer);
this.length = this.dataBuffer.byteLength;
}
Unpacker.prototype.unpack = function(){
var type = this.unpack_uint8();
if (type < 0x80){
var positive_fixnum = type;
return positive_fixnum;
} else if ((type ^ 0xe0) < 0x20){
var negative_fixnum = (type ^ 0xe0) - 0x20;
return negative_fixnum;
}
var size;
if ((size = type ^ 0xa0) <= 0x0f){
return this.unpack_raw(size);
} else if ((size = type ^ 0xb0) <= 0x0f){
return this.unpack_string(size);
} else if ((size = type ^ 0x90) <= 0x0f){
return this.unpack_array(size);
} else if ((size = type ^ 0x80) <= 0x0f){
return this.unpack_map(size);
}
switch(type){
case 0xc0:
return null;
case 0xc1:
return undefined;
case 0xc2:
return false;
case 0xc3:
return true;
case 0xca:
return this.unpack_float();
case 0xcb:
return this.unpack_double();
case 0xcc:
return this.unpack_uint8();
case 0xcd:
return this.unpack_uint16();
case 0xce:
return this.unpack_uint32();
case 0xcf:
return this.unpack_uint64();
case 0xd0:
return this.unpack_int8();
case 0xd1:
return this.unpack_int16();
case 0xd2:
return this.unpack_int32();
case 0xd3:
return this.unpack_int64();
case 0xd4:
return undefined;
case 0xd5:
return undefined;
case 0xd6:
return undefined;
case 0xd7:
return undefined;
case 0xd8:
size = this.unpack_uint16();
return this.unpack_string(size);
case 0xd9:
size = this.unpack_uint32();
return this.unpack_string(size);
case 0xda:
size = this.unpack_uint16();
return this.unpack_raw(size);
case 0xdb:
size = this.unpack_uint32();
return this.unpack_raw(size);
case 0xdc:
size = this.unpack_uint16();
return this.unpack_array(size);
case 0xdd:
size = this.unpack_uint32();
return this.unpack_array(size);
case 0xde:
size = this.unpack_uint16();
return this.unpack_map(size);
case 0xdf:
size = this.unpack_uint32();
return this.unpack_map(size);
}
}
Unpacker.prototype.unpack_uint8 = function(){
var byte = this.dataView[this.index] & 0xff;
this.index++;
return byte;
};
Unpacker.prototype.unpack_uint16 = function(){
var bytes = this.read(2);
var uint16 =
((bytes[0] & 0xff) * 256) + (bytes[1] & 0xff);
this.index += 2;
return uint16;
}
Unpacker.prototype.unpack_uint32 = function(){
var bytes = this.read(4);
var uint32 =
((bytes[0] * 256 +
bytes[1]) * 256 +
bytes[2]) * 256 +
bytes[3];
this.index += 4;
return uint32;
}
Unpacker.prototype.unpack_uint64 = function(){
var bytes = this.read(8);
var uint64 =
((((((bytes[0] * 256 +
bytes[1]) * 256 +
bytes[2]) * 256 +
bytes[3]) * 256 +
bytes[4]) * 256 +
bytes[5]) * 256 +
bytes[6]) * 256 +
bytes[7];
this.index += 8;
return uint64;
}
Unpacker.prototype.unpack_int8 = function(){
var uint8 = this.unpack_uint8();
return (uint8 < 0x80 ) ? uint8 : uint8 - (1 << 8);
};
Unpacker.prototype.unpack_int16 = function(){
var uint16 = this.unpack_uint16();
return (uint16 < 0x8000 ) ? uint16 : uint16 - (1 << 16);
}
Unpacker.prototype.unpack_int32 = function(){
var uint32 = this.unpack_uint32();
return (uint32 < Math.pow(2, 31) ) ? uint32 :
uint32 - Math.pow(2, 32);
}
Unpacker.prototype.unpack_int64 = function(){
var uint64 = this.unpack_uint64();
return (uint64 < Math.pow(2, 63) ) ? uint64 :
uint64 - Math.pow(2, 64);
}
Unpacker.prototype.unpack_raw = function(size){
if ( this.length < this.index + size){
throw new Error('BinaryPackFailure: index is out of range'
+ ' ' + this.index + ' ' + size + ' ' + this.length);
}
var buf = this.dataBuffer.slice(this.index, this.index + size);
this.index += size;
//buf = util.bufferToString(buf);
return buf;
}
Unpacker.prototype.unpack_string = function(size){
var bytes = this.read(size);
var i = 0, str = '', c, code;
while(i < size){
c = bytes[i];
if ( c < 128){
str += String.fromCharCode(c);
i++;
} else if ((c ^ 0xc0) < 32){
code = ((c ^ 0xc0) << 6) | (bytes[i+1] & 63);
str += String.fromCharCode(code);
i += 2;
} else {
code = ((c & 15) << 12) | ((bytes[i+1] & 63) << 6) |
(bytes[i+2] & 63);
str += String.fromCharCode(code);
i += 3;
}
}
this.index += size;
return str;
}
Unpacker.prototype.unpack_array = function(size){
var objects = new Array(size);
for(var i = 0; i < size ; i++){
objects[i] = this.unpack();
}
return objects;
}
Unpacker.prototype.unpack_map = function(size){
var map = {};
for(var i = 0; i < size ; i++){
var key = this.unpack();
var value = this.unpack();
map[key] = value;
}
return map;
}
Unpacker.prototype.unpack_float = function(){
var uint32 = this.unpack_uint32();
var sign = uint32 >> 31;
var exp = ((uint32 >> 23) & 0xff) - 127;
var fraction = ( uint32 & 0x7fffff ) | 0x800000;
return (sign == 0 ? 1 : -1) *
fraction * Math.pow(2, exp - 23);
}
Unpacker.prototype.unpack_double = function(){
var h32 = this.unpack_uint32();
var l32 = this.unpack_uint32();
var sign = h32 >> 31;
var exp = ((h32 >> 20) & 0x7ff) - 1023;
var hfrac = ( h32 & 0xfffff ) | 0x100000;
var frac = hfrac * Math.pow(2, exp - 20) +
l32 * Math.pow(2, exp - 52);
return (sign == 0 ? 1 : -1) * frac;
}
Unpacker.prototype.read = function(length){
var j = this.index;
if (j + length <= this.length) {
return this.dataView.subarray(j, j + length);
} else {
throw new Error('BinaryPackFailure: read index out of range');
}
}
function Packer(){
this.bufferBuilder = new BufferBuilder();
}
Packer.prototype.getBuffer = function(){
return this.bufferBuilder.getBuffer();
}
Packer.prototype.pack = function(value){
var type = typeof(value);
if (type == 'string'){
this.pack_string(value);
} else if (type == 'number'){
if (Math.floor(value) === value){
this.pack_integer(value);
} else{
this.pack_double(value);
}
} else if (type == 'boolean'){
if (value === true){
this.bufferBuilder.append(0xc3);
} else if (value === false){
this.bufferBuilder.append(0xc2);
}
} else if (type == 'undefined'){
this.bufferBuilder.append(0xc0);
} else if (type == 'object'){
if (value === null){
this.bufferBuilder.append(0xc0);
} else {
var constructor = value.constructor;
if (constructor == Array){
this.pack_array(value);
} else if (constructor == Blob || constructor == File) {
this.pack_bin(value);
} else if (constructor == ArrayBuffer) {
if(binaryFeatures.useArrayBufferView) {
this.pack_bin(new Uint8Array(value));
} else {
this.pack_bin(value);
}
} else if ('BYTES_PER_ELEMENT' in value){
if(binaryFeatures.useArrayBufferView) {
this.pack_bin(new Uint8Array(value.buffer));
} else {
this.pack_bin(value.buffer);
}
} else if (constructor == Object){
this.pack_object(value);
} else if (constructor == Date){
this.pack_string(value.toString());
} else if (typeof value.toBinaryPack == 'function'){
this.bufferBuilder.append(value.toBinaryPack());
} else {
throw new Error('Type "' + constructor.toString() + '" not yet supported');
}
}
} else {
throw new Error('Type "' + type + '" not yet supported');
}
this.bufferBuilder.flush();
}
Packer.prototype.pack_bin = function(blob){
var length = blob.length || blob.byteLength || blob.size;
if (length <= 0x0f){
this.pack_uint8(0xa0 + length);
} else if (length <= 0xffff){
this.bufferBuilder.append(0xda) ;
this.pack_uint16(length);
} else if (length <= 0xffffffff){
this.bufferBuilder.append(0xdb);
this.pack_uint32(length);
} else{
throw new Error('Invalid length');
}
this.bufferBuilder.append(blob);
}
Packer.prototype.pack_string = function(str){
var length = utf8Length(str);
if (length <= 0x0f){
this.pack_uint8(0xb0 + length);
} else if (length <= 0xffff){
this.bufferBuilder.append(0xd8) ;
this.pack_uint16(length);
} else if (length <= 0xffffffff){
this.bufferBuilder.append(0xd9);
this.pack_uint32(length);
} else{
throw new Error('Invalid length');
}
this.bufferBuilder.append(str);
}
Packer.prototype.pack_array = function(ary){
var length = ary.length;
if (length <= 0x0f){
this.pack_uint8(0x90 + length);
} else if (length <= 0xffff){
this.bufferBuilder.append(0xdc)
this.pack_uint16(length);
} else if (length <= 0xffffffff){
this.bufferBuilder.append(0xdd);
this.pack_uint32(length);
} else{
throw new Error('Invalid length');
}
for(var i = 0; i < length ; i++){
this.pack(ary[i]);
}
}
Packer.prototype.pack_integer = function(num){
if ( -0x20 <= num && num <= 0x7f){
this.bufferBuilder.append(num & 0xff);
} else if (0x00 <= num && num <= 0xff){
this.bufferBuilder.append(0xcc);
this.pack_uint8(num);
} else if (-0x80 <= num && num <= 0x7f){
this.bufferBuilder.append(0xd0);
this.pack_int8(num);
} else if ( 0x0000 <= num && num <= 0xffff){
this.bufferBuilder.append(0xcd);
this.pack_uint16(num);
} else if (-0x8000 <= num && num <= 0x7fff){
this.bufferBuilder.append(0xd1);
this.pack_int16(num);
} else if ( 0x00000000 <= num && num <= 0xffffffff){
this.bufferBuilder.append(0xce);
this.pack_uint32(num);
} else if (-0x80000000 <= num && num <= 0x7fffffff){
this.bufferBuilder.append(0xd2);
this.pack_int32(num);
} else if (-0x8000000000000000 <= num && num <= 0x7FFFFFFFFFFFFFFF){
this.bufferBuilder.append(0xd3);
this.pack_int64(num);
} else if (0x0000000000000000 <= num && num <= 0xFFFFFFFFFFFFFFFF){
this.bufferBuilder.append(0xcf);
this.pack_uint64(num);
} else{
throw new Error('Invalid integer');
}
}
Packer.prototype.pack_double = function(num){
var sign = 0;
if (num < 0){
sign = 1;
num = -num;
}
var exp = Math.floor(Math.log(num) / Math.LN2);
var frac0 = num / Math.pow(2, exp) - 1;
var frac1 = Math.floor(frac0 * Math.pow(2, 52));
var b32 = Math.pow(2, 32);
var h32 = (sign << 31) | ((exp+1023) << 20) |
(frac1 / b32) & 0x0fffff;
var l32 = frac1 % b32;
this.bufferBuilder.append(0xcb);
this.pack_int32(h32);
this.pack_int32(l32);
}
Packer.prototype.pack_object = function(obj){
var keys = Object.keys(obj);
var length = keys.length;
if (length <= 0x0f){
this.pack_uint8(0x80 + length);
} else if (length <= 0xffff){
this.bufferBuilder.append(0xde);
this.pack_uint16(length);
} else if (length <= 0xffffffff){
this.bufferBuilder.append(0xdf);
this.pack_uint32(length);
} else{
throw new Error('Invalid length');
}
for(var prop in obj){
if (obj.hasOwnProperty(prop)){
this.pack(prop);
this.pack(obj[prop]);
}
}
}
Packer.prototype.pack_uint8 = function(num){
this.bufferBuilder.append(num);
}
Packer.prototype.pack_uint16 = function(num){
this.bufferBuilder.append(num >> 8);
this.bufferBuilder.append(num & 0xff);
}
Packer.prototype.pack_uint32 = function(num){
var n = num & 0xffffffff;
this.bufferBuilder.append((n & 0xff000000) >>> 24);
this.bufferBuilder.append((n & 0x00ff0000) >>> 16);
this.bufferBuilder.append((n & 0x0000ff00) >>> 8);
this.bufferBuilder.append((n & 0x000000ff));
}
Packer.prototype.pack_uint64 = function(num){
var high = num / Math.pow(2, 32);
var low = num % Math.pow(2, 32);
this.bufferBuilder.append((high & 0xff000000) >>> 24);
this.bufferBuilder.append((high & 0x00ff0000) >>> 16);
this.bufferBuilder.append((high & 0x0000ff00) >>> 8);
this.bufferBuilder.append((high & 0x000000ff));
this.bufferBuilder.append((low & 0xff000000) >>> 24);
this.bufferBuilder.append((low & 0x00ff0000) >>> 16);
this.bufferBuilder.append((low & 0x0000ff00) >>> 8);
this.bufferBuilder.append((low & 0x000000ff));
}
Packer.prototype.pack_int8 = function(num){
this.bufferBuilder.append(num & 0xff);
}
Packer.prototype.pack_int16 = function(num){
this.bufferBuilder.append((num & 0xff00) >> 8);
this.bufferBuilder.append(num & 0xff);
}
Packer.prototype.pack_int32 = function(num){
this.bufferBuilder.append((num >>> 24) & 0xff);
this.bufferBuilder.append((num & 0x00ff0000) >>> 16);
this.bufferBuilder.append((num & 0x0000ff00) >>> 8);
this.bufferBuilder.append((num & 0x000000ff));
}
Packer.prototype.pack_int64 = function(num){
var high = Math.floor(num / Math.pow(2, 32));
var low = num % Math.pow(2, 32);
this.bufferBuilder.append((high & 0xff000000) >>> 24);
this.bufferBuilder.append((high & 0x00ff0000) >>> 16);
this.bufferBuilder.append((high & 0x0000ff00) >>> 8);
this.bufferBuilder.append((high & 0x000000ff));
this.bufferBuilder.append((low & 0xff000000) >>> 24);
this.bufferBuilder.append((low & 0x00ff0000) >>> 16);
this.bufferBuilder.append((low & 0x0000ff00) >>> 8);
this.bufferBuilder.append((low & 0x000000ff));
}
function _utf8Replace(m){
var code = m.charCodeAt(0);
if(code <= 0x7ff) return '00';
if(code <= 0xffff) return '000';
if(code <= 0x1fffff) return '0000';
if(code <= 0x3ffffff) return '00000';
return '000000';
}
function utf8Length(str){
if (str.length > 600) {
// Blob method faster for large strings
return (new Blob([str])).size;
} else {
return str.replace(/[^\u0000-\u007F]/g, _utf8Replace).length;
}
}
},{"./bufferbuilder":19}],19:[function(require,module,exports){
var binaryFeatures = {};
binaryFeatures.useBlobBuilder = (function(){
try {
new Blob([]);
return false;
} catch (e) {
return true;
}
})();
binaryFeatures.useArrayBufferView = !binaryFeatures.useBlobBuilder && (function(){
try {
return (new Blob([new Uint8Array([])])).size === 0;
} catch (e) {
return true;
}
})();
module.exports.binaryFeatures = binaryFeatures;
var BlobBuilder = module.exports.BlobBuilder;
if (typeof window != 'undefined') {
BlobBuilder = module.exports.BlobBuilder = window.WebKitBlobBuilder ||
window.MozBlobBuilder || window.MSBlobBuilder || window.BlobBuilder;
}
function BufferBuilder(){
this._pieces = [];
this._parts = [];
}
BufferBuilder.prototype.append = function(data) {
if(typeof data === 'number') {
this._pieces.push(data);
} else {
this.flush();
this._parts.push(data);
}
};
BufferBuilder.prototype.flush = function() {
if (this._pieces.length > 0) {
var buf = new Uint8Array(this._pieces);
if(!binaryFeatures.useArrayBufferView) {
buf = buf.buffer;
}
this._parts.push(buf);
this._pieces = [];
}
};
BufferBuilder.prototype.getBuffer = function() {
this.flush();
if(binaryFeatures.useBlobBuilder) {
var builder = new BlobBuilder();
for(var i = 0, ii = this._parts.length; i < ii; i++) {
builder.append(this._parts[i]);
}
return builder.getBlob();
} else {
return new Blob(this._parts);
}
};
module.exports.BufferBuilder = BufferBuilder;
},{}],20:[function(require,module,exports){
var util = require('./util');
/**
* Reliable transfer for Chrome Canary DataChannel impl.
* Author: @michellebu
*/
function Reliable(dc, debug) {
if (!(this instanceof Reliable)) return new Reliable(dc);
this._dc = dc;
util.debug = debug;
// Messages sent/received so far.
// id: { ack: n, chunks: [...] }
this._outgoing = {};
// id: { ack: ['ack', id, n], chunks: [...] }
this._incoming = {};
this._received = {};
// Window size.
this._window = 1000;
// MTU.
this._mtu = 500;
// Interval for setInterval. In ms.
this._interval = 0;
// Messages sent.
this._count = 0;
// Outgoing message queue.
this._queue = [];
this._setupDC();
};
// Send a message reliably.
Reliable.prototype.send = function(msg) {
// Determine if chunking is necessary.
var bl = util.pack(msg);
if (bl.size < this._mtu) {
this._handleSend(['no', bl]);
return;
}
this._outgoing[this._count] = {
ack: 0,
chunks: this._chunk(bl)
};
if (util.debug) {
this._outgoing[this._count].timer = new Date();
}
// Send prelim window.
this._sendWindowedChunks(this._count);
this._count += 1;
};
// Set up interval for processing queue.
Reliable.prototype._setupInterval = function() {
// TODO: fail gracefully.
var self = this;
this._timeout = setInterval(function() {
// FIXME: String stuff makes things terribly async.
var msg = self._queue.shift();
if (msg._multiple) {
for (var i = 0, ii = msg.length; i < ii; i += 1) {
self._intervalSend(msg[i]);
}
} else {
self._intervalSend(msg);
}
}, this._interval);
};
Reliable.prototype._intervalSend = function(msg) {
var self = this;
msg = util.pack(msg);
util.blobToBinaryString(msg, function(str) {
self._dc.send(str);
});
if (self._queue.length === 0) {
clearTimeout(self._timeout);
self._timeout = null;
//self._processAcks();
}
};
// Go through ACKs to send missing pieces.
Reliable.prototype._processAcks = function() {
for (var id in this._outgoing) {
if (this._outgoing.hasOwnProperty(id)) {
this._sendWindowedChunks(id);
}
}
};
// Handle sending a message.
// FIXME: Don't wait for interval time for all messages...
Reliable.prototype._handleSend = function(msg) {
var push = true;
for (var i = 0, ii = this._queue.length; i < ii; i += 1) {
var item = this._queue[i];
if (item === msg) {
push = false;
} else if (item._multiple && item.indexOf(msg) !== -1) {
push = false;
}
}
if (push) {
this._queue.push(msg);
if (!this._timeout) {
this._setupInterval();
}
}
};
// Set up DataChannel handlers.
Reliable.prototype._setupDC = function() {
// Handle various message types.
var self = this;
this._dc.onmessage = function(e) {
var msg = e.data;
var datatype = msg.constructor;
// FIXME: msg is String until binary is supported.
// Once that happens, this will have to be smarter.
if (datatype === String) {
var ab = util.binaryStringToArrayBuffer(msg);
msg = util.unpack(ab);
self._handleMessage(msg);
}
};
};
// Handles an incoming message.
Reliable.prototype._handleMessage = function(msg) {
var id = msg[1];
var idata = this._incoming[id];
var odata = this._outgoing[id];
var data;
switch (msg[0]) {
// No chunking was done.
case 'no':
var message = id;
if (!!message) {
this.onmessage(util.unpack(message));
}
break;
// Reached the end of the message.
case 'end':
data = idata;
// In case end comes first.
this._received[id] = msg[2];
if (!data) {
break;
}
this._ack(id);
break;
case 'ack':
data = odata;
if (!!data) {
var ack = msg[2];
// Take the larger ACK, for out of order messages.
data.ack = Math.max(ack, data.ack);
// Clean up when all chunks are ACKed.
if (data.ack >= data.chunks.length) {
util.log('Time: ', new Date() - data.timer);
delete this._outgoing[id];
} else {
this._processAcks();
}
}
// If !data, just ignore.
break;
// Received a chunk of data.
case 'chunk':
// Create a new entry if none exists.
data = idata;
if (!data) {
var end = this._received[id];
if (end === true) {
break;
}
data = {
ack: ['ack', id, 0],
chunks: []
};
this._incoming[id] = data;
}
var n = msg[2];
var chunk = msg[3];
data.chunks[n] = new Uint8Array(chunk);
// If we get the chunk we're looking for, ACK for next missing.
// Otherwise, ACK the same N again.
if (n === data.ack[2]) {
this._calculateNextAck(id);
}
this._ack(id);
break;
default:
// Shouldn't happen, but would make sense for message to just go
// through as is.
this._handleSend(msg);
break;
}
};
// Chunks BL into smaller messages.
Reliable.prototype._chunk = function(bl) {
var chunks = [];
var size = bl.size;
var start = 0;
while (start < size) {
var end = Math.min(size, start + this._mtu);
var b = bl.slice(start, end);
var chunk = {
payload: b
}
chunks.push(chunk);
start = end;
}
util.log('Created', chunks.length, 'chunks.');
return chunks;
};
// Sends ACK N, expecting Nth blob chunk for message ID.
Reliable.prototype._ack = function(id) {
var ack = this._incoming[id].ack;
// if ack is the end value, then call _complete.
if (this._received[id] === ack[2]) {
this._complete(id);
this._received[id] = true;
}
this._handleSend(ack);
};
// Calculates the next ACK number, given chunks.
Reliable.prototype._calculateNextAck = function(id) {
var data = this._incoming[id];
var chunks = data.chunks;
for (var i = 0, ii = chunks.length; i < ii; i += 1) {
// This chunk is missing!!! Better ACK for it.
if (chunks[i] === undefined) {
data.ack[2] = i;
return;
}
}
data.ack[2] = chunks.length;
};
// Sends the next window of chunks.
Reliable.prototype._sendWindowedChunks = function(id) {
util.log('sendWindowedChunks for: ', id);
var data = this._outgoing[id];
var ch = data.chunks;
var chunks = [];
var limit = Math.min(data.ack + this._window, ch.length);
for (var i = data.ack; i < limit; i += 1) {
if (!ch[i].sent || i === data.ack) {
ch[i].sent = true;
chunks.push(['chunk', id, i, ch[i].payload]);
}
}
if (data.ack + this._window >= ch.length) {
chunks.push(['end', id, ch.length])
}
chunks._multiple = true;
this._handleSend(chunks);
};
// Puts together a message from chunks.
Reliable.prototype._complete = function(id) {
util.log('Completed called for', id);
var self = this;
var chunks = this._incoming[id].chunks;
var bl = new Blob(chunks);
util.blobToArrayBuffer(bl, function(ab) {
self.onmessage(util.unpack(ab));
});
delete this._incoming[id];
};
// Ups bandwidth limit on SDP. Meant to be called during offer/answer.
Reliable.higherBandwidthSDP = function(sdp) {
// AS stands for Application-Specific Maximum.
// Bandwidth number is in kilobits / sec.
// See RFC for more info: http://www.ietf.org/rfc/rfc2327.txt
// Chrome 31+ doesn't want us munging the SDP, so we'll let them have their
// way.
var version = navigator.appVersion.match(/Chrome\/(.*?) /);
if (version) {
version = parseInt(version[1].split('.').shift());
if (version < 31) {
var parts = sdp.split('b=AS:30');
var replace = 'b=AS:102400'; // 100 Mbps
if (parts.length > 1) {
return parts[0] + replace + parts[1];
}
}
}
return sdp;
};
// Overwritten, typically.
Reliable.prototype.onmessage = function(msg) {};
module.exports.Reliable = Reliable;
},{"./util":21}],21:[function(require,module,exports){
var BinaryPack = require('js-binarypack');
var util = {
debug: false,
inherits: function(ctor, superCtor) {
ctor.super_ = superCtor;
ctor.prototype = Object.create(superCtor.prototype, {
constructor: {
value: ctor,
enumerable: false,
writable: true,
configurable: true
}
});
},
extend: function(dest, source) {
for(var key in source) {
if(source.hasOwnProperty(key)) {
dest[key] = source[key];
}
}
return dest;
},
pack: BinaryPack.pack,
unpack: BinaryPack.unpack,
log: function () {
if (util.debug) {
var copy = [];
for (var i = 0; i < arguments.length; i++) {
copy[i] = arguments[i];
}
copy.unshift('Reliable: ');
console.log.apply(console, copy);
}
},
setZeroTimeout: (function(global) {
var timeouts = [];
var messageName = 'zero-timeout-message';
// Like setTimeout, but only takes a function argument. There's
// no time argument (always zero) and no arguments (you have to
// use a closure).
function setZeroTimeoutPostMessage(fn) {
timeouts.push(fn);
global.postMessage(messageName, '*');
}
function handleMessage(event) {
if (event.source == global && event.data == messageName) {
if (event.stopPropagation) {
event.stopPropagation();
}
if (timeouts.length) {
timeouts.shift()();
}
}
}
if (global.addEventListener) {
global.addEventListener('message', handleMessage, true);
} else if (global.attachEvent) {
global.attachEvent('onmessage', handleMessage);
}
return setZeroTimeoutPostMessage;
}(this)),
blobToArrayBuffer: function(blob, cb){
var fr = new FileReader();
fr.onload = function(evt) {
cb(evt.target.result);
};
fr.readAsArrayBuffer(blob);
},
blobToBinaryString: function(blob, cb){
var fr = new FileReader();
fr.onload = function(evt) {
cb(evt.target.result);
};
fr.readAsBinaryString(blob);
},
binaryStringToArrayBuffer: function(binary) {
var byteArray = new Uint8Array(binary.length);
for (var i = 0; i < binary.length; i++) {
byteArray[i] = binary.charCodeAt(i) & 0xff;
}
return byteArray.buffer;
},
randomToken: function () {
return Math.random().toString(36).substr(2);
}
};
module.exports = util;
},{"js-binarypack":18}]},{},[16]); | } |
value.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package flag
import (
"fmt"
"strings"
"github.com/elastic/go-ucfg"
"github.com/elastic/go-ucfg/internal/parse"
)
// NewFlagKeyValue implements the flag.Value interface for
// capturing ucfg.Config settings from command line arguments.
// Configuration options follow the argument name and must be in the form of
// "key=value". Using 'D' as command line flag for example, options on command line
// must be given as:
//
// -D key1=value -D key=value
//
// Note: the space between command line option and key is required by the flag
// package to parse command line flags correctly.
//
// Note: it's valid to use a key multiple times. If keys are used multiple
// times, values get overwritten. The last known value for some key will be stored
// in the generated configuration.
//
// The type of value must be any of bool, uint, int, float, or string. Any kind
// of array or object syntax is not supported.
//
// If autoBool is enabled (default if Config or ConfigVar is used), keys without
// value are converted to bool variable with value being true.
func NewFlagKeyValue(cfg *ucfg.Config, autoBool bool, opts ...ucfg.Option) *FlagValue | {
return newFlagValue(cfg, opts, func(arg string) (*ucfg.Config, error, error) {
var key string
var val interface{}
var err error
args := strings.SplitN(arg, "=", 2)
if len(args) < 2 {
if !autoBool || len(args) == 0 {
err := fmt.Errorf("argument '%v' is empty ", arg)
return nil, err, err
}
key = arg
val = true
} else {
key = args[0]
if args[1] == "" {
return nil, nil, nil
}
val, err = parse.Value(args[1])
if err != nil {
return nil, err, err
}
}
tmp := map[string]interface{}{key: val}
cfg, err := ucfg.NewFrom(tmp, opts...)
return cfg, err, err
})
} |
|
main.go | // Copyright 2020 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
log "github.com/sirupsen/logrus"
"github.com/spf13/pflag"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/operator-framework/operator-sdk/pkg/helm"
hoflags "github.com/operator-framework/operator-sdk/pkg/helm/flags"
"github.com/operator-framework/operator-sdk/pkg/log/zap"
)
func main() | {
flags := hoflags.AddTo(pflag.CommandLine)
pflag.Parse()
logf.SetLogger(zap.Logger())
if err := helm.Run(flags); err != nil {
log.Fatal(err)
}
} |
|
params.rs | use std::str::FromStr;
use std::fmt;
use std::num::ParseIntError;
#[derive(Debug, PartialEq)]
pub struct Params {
query: Vec<(String, String)>,
extra_fields: Vec<String>,
lang: Option<String>,
limit: Option<u64>,
offset: Option<u64>,
}
impl Params {
pub fn new() -> Params {
Params{
query: Default::default(),
lang: None,
limit: None,
offset: None,
extra_fields: Default::default(),
}
}
pub fn lang(self, l: impl Into<String>) -> Params {
let mut params = self;
params.lang = Some(l.into());
params
}
pub fn limit(self, l: u64) -> Params {
let mut params = self;
params.limit = Some(l);
params
}
pub fn offset(self, o: u64) -> Params {
let mut params = self;
params.offset = Some(o);
params
}
pub fn add_query<K, V>(self, key: K, value: V) -> Params
where K: Into<String>, V: Into<String> {
let mut params = self;
params.query.push((key.into(), value.into()));
params
}
pub fn with_extra_fields(self, fields: Vec<String>) -> Params {
let mut params = self;
params.extra_fields = fields;
params
}
} | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut res = Vec::new();
if !self.query.is_empty() {
let q: Vec<String> = self.query.iter().map(|(key, value)| {
let mut tuple = key.clone();
if !value.is_empty() {
tuple.push_str(":");
tuple.push_str(&value);
}; tuple
}).collect();
res.push(format!("q={}", q.join("+")));
}
if !self.extra_fields.is_empty() {
res.push(format!("extra_fields={}", &self.extra_fields.join(",")));
}
self.lang.as_ref().map(|l|{res.push(format!("lang={}", l));});
self.limit.map(|l|{res.push(format!("limit={}", l));});
self.offset.map(|o|{res.push(format!("offset={}", o));});
write!(f, "?{}", res.join("&"))
}
}
impl FromStr for Params {
type Err = ParseIntError;
fn from_str(input: &str) -> Result<Params, Self::Err> {
let mut params = Params::new();
let p: Vec<&str> = input.trim_start_matches('?').split('&').collect();
for v in &p {
let tuple: Vec<&str> = v.split('=').collect();
if tuple.len() > 1 {
match tuple[0] {
"limit" => params.limit = Some(tuple[1].parse::<u64>()?),
"offset" => params.offset = Some(tuple[1].parse::<u64>()?),
"lang" => params.lang = Some(tuple[1].to_string()),
"extra_fields" => params.extra_fields = tuple[1]
.split(',').map(|i| i.to_string()).collect(),
"q" => {
let queries: Vec<&str> = tuple[1].split(|c| c == '+' || c == ' ').collect();
for query in queries {
let tuple:Vec<&str> = query.split(":").collect();
match tuple.len() {
1 => params.query.push((tuple[0].to_string(), "".to_string())),
2 => params.query.push((tuple[0].to_string(), tuple[1].to_string())),
_ => (),
}
}
},
_ => (),
}
}
}
Ok(params)
}
}
impl From<Params> for std::string::String {
fn from(p: Params) -> Self {
p.to_string()
}
}
#[cfg(test)]
mod tests {
use super::Params;
use std::str::FromStr;
#[test]
fn from_string() {
assert_eq!(
Params::from_str("?q=tag_id:1337+beer&extra_fields=user,user.avatar&limit=30&offset=10&lang=fr").unwrap(),
Params::new()
.with_extra_fields(vec!["user".to_string(), "user.avatar".to_string()])
.add_query("tag_id", "1337")
.add_query("beer", "")
.limit(30)
.offset(10)
.lang("fr"));
}
#[test]
fn to_string() {
let p = Params::new()
.with_extra_fields(vec!["user".to_string(), "user.avatar".to_string()])
.add_query("tag_id", "1337")
.add_query("beer", "")
.limit(30)
.offset(10)
.lang("fr");
assert_eq!(p.to_string(), "?q=tag_id:1337+beer&extra_fields=user,user.avatar&lang=fr&limit=30&offset=10");
}
} |
impl fmt::Display for Params { |
pretrain_Unet.py | import numpy as np
from tqdm import tqdm, trange
from script.data_handler.Base.BaseDataset import BaseDataset
from script.model.sklearn_like_model.BaseModel import BaseModel
from script.model.sklearn_like_model.Mixin import UnsupervisedMetricCallback
from script.model.sklearn_like_model.NetModule.BaseNetModule import BaseNetModule
from script.model.sklearn_like_model.NetModule.FusionNetStructure import FusionNetModule
from script.model.sklearn_like_model.NetModule.PlaceHolderModule import PlaceHolderModule
from script.model.sklearn_like_model.NetModule.TFDynamicLearningRate import TFDynamicLearningRate
from script.util.Stacker import Stacker
from script.util.tensor_ops import *
class pre_train_Unet(BaseModel):
def __init__(
self,
verbose=10,
learning_rate=0.01,
beta1=0.9,
batch_size=100,
stage=4,
n_classes=2,
capacity=64,
depth=1,
dropout_rate=0.5,
**kwargs
):
BaseModel.__init__(self, verbose, **kwargs)
self.batch_size = batch_size
self.learning_rate = learning_rate
self.beta1 = beta1
self.dropout_rate = dropout_rate
self.capacity = capacity
self.stage = stage
self.n_classes = n_classes
self.depth = depth
def _build_input_shapes(self, shapes):
self.x_ph_module = PlaceHolderModule(shapes['x'], tf.float32, name='x')
ret = {}
ret.update(self.x_ph_module.shape_dict)
return ret
def _build_main_graph(self):
self.Xs = self.x_ph_module.build().placeholder
self.net_module = FusionNetModule(
self.Xs, capacity=self.capacity, depth=self.depth, level=self.stage,
n_classes=self.n_classes, dropout_rate=self.dropout_rate
).build()
self.decode = self.net_module.decode
self.recon_module = reconModule(
self.decode, self.capacity
)
self.recon_module.build()
self._recon = self.recon_module.recon
self._recon = self.decode
self.vars = self.net_module.vars
self.vars += self.recon_module.vars
def _build_loss_ops(self):
self.loss = tf.squared_difference(self.Xs, self._recon, name='loss')
self.loss_mean = tf.reduce_mean(self.loss, name='loss_mean')
def _build_train_ops(self):
self.drl = TFDynamicLearningRate(self.learning_rate)
self.drl.build()
self.train_op = tf.train.AdamOptimizer(
self.drl.learning_rate, self.beta1
).minimize(
loss=self.loss_mean, var_list=self.vars
)
def _train_iter(self, dataset, batch_size):
# self.net_module.set_train(self.sess)
x = dataset.next_batch(self.batch_size)
_ = self.sess.run(self.train_op, {self.Xs: x})
# self.net_module.set_predict(self.sess)
def train_AE(
self, x, epoch=1, batch_size=None, dataset_callback=None,
epoch_pbar=True, iter_pbar=True, epoch_callbacks=None,
):
if not self.is_built:
raise RuntimeError(f'{self} not built')
batch_size = getattr(self, 'batch_size') if batch_size is None else batch_size
dataset = dataset_callback if dataset_callback else BaseDataset(x=x)
metric = None
epoch_pbar = tqdm([i for i in range(1, epoch + 1)]) if epoch_pbar else None
for _ in range(1, epoch + 1):
dataset.shuffle()
iter_pbar = trange if iter_pbar else range
for _ in iter_pbar(int(dataset.size / batch_size)):
self._train_iter(dataset, batch_size)
self.sess.run(self.op_inc_global_epoch)
global_epoch = self.sess.run(self.global_epoch)
if epoch_pbar: epoch_pbar.update(1)
metric = getattr(self, 'metric', None)(x)
if metric in (np.nan, np.inf, -np.inf):
tqdm.write(f'train fail, e = {global_epoch}, metric = {metric}')
break
results = []
if epoch_callbacks:
for callback in epoch_callbacks:
result = callback(self, dataset, metric, global_epoch)
results += [result]
break_epoch = False
for result in results:
if result and getattr(result, 'break_epoch', False):
break_epoch = True
if break_epoch: break
if epoch_pbar: epoch_pbar.close()
if dataset_callback: del dataset
return metric
def metric(self, x):
if not getattr(self, '_metric_callback', None):
self._metric_callback = UnsupervisedMetricCallback(
self, self.loss_mean, self.Xs,
)
return self._metric_callback(x)
def update_learning_rate(self, lr):
self.learning_rate = lr
if self.sess is not None:
self.drl.update(self.sess, self.learning_rate)
class reconModule(BaseNetModule):
| def __init__(self, x, capacity=None, reuse=False, name=None, verbose=0):
super().__init__(capacity, reuse, name, verbose)
self.x = x
def build(self):
with tf.variable_scope(self.name):
stacker = Stacker(self.x)
stacker.conv2d(1, CONV_FILTER_3311)
self.recon = stacker.sigmoid()
return self |
|
Title.js | import React from "react"
import styled from "styled-components"
export const Title = ({ title, subtitle }) => {
return (
<TitleWrapper>
<h4>
<span className="title">{title}</span>
<span>{subtitle}</span>
</h4>
</TitleWrapper>
)
}
const TitleWrapper = styled.div`
text-transform: uppercase;
font-size: 2.3rem;
margin-bottom: 2rem;
h4 {
text-align: center;
letter-spacing: 7px;
color: var(--primaryColor);
}
.title {
color: var(--mainBlack);
}
span { | }
@media (min-width: 576px) {
span {
display: inline-block;
margin: 0 0.35rem;
}
}
`
export default Title | display: block; |
collector.rs | use super::data_provider::*;
use crate::kv::{mdbx::*, tables::ErasedTable, traits::*};
use derive_more::*;
use std::{
cmp::Reverse,
collections::BinaryHeap,
ops::{Generator, GeneratorState},
pin::Pin,
};
use tempfile::TempDir;
pub struct Collector<'tmp, Key, Value>
where
Key: TableEncode,
Value: TableEncode,
<Key as TableEncode>::Encoded: Ord,
<Value as TableEncode>::Encoded: Ord,
Vec<u8>: From<<Key as TableEncode>::Encoded>,
Vec<u8>: From<<Value as TableEncode>::Encoded>,
{
tempdir: &'tmp TempDir,
buffer_size: usize,
data_providers: Vec<DataProvider>,
buffer_capacity: usize,
buffer: Vec<Entry<<Key as TableEncode>::Encoded, <Value as TableEncode>::Encoded>>,
}
pub const OPTIMAL_BUFFER_CAPACITY: usize = 512000000; // 512 Megabytes
impl<'tmp, Key, Value> Collector<'tmp, Key, Value>
where
Key: TableEncode,
Value: TableEncode,
<Key as TableEncode>::Encoded: Ord,
<Value as TableEncode>::Encoded: Ord,
Vec<u8>: From<<Key as TableEncode>::Encoded>,
Vec<u8>: From<<Value as TableEncode>::Encoded>,
{
pub fn new(tempdir: &'tmp TempDir, buffer_capacity: usize) -> Self {
Self {
tempdir,
buffer_size: 0,
buffer_capacity,
data_providers: Vec::new(),
buffer: Vec::new(),
}
}
fn flush(&mut self) {
self.buffer_size = 0;
self.buffer.sort_unstable();
let mut buf = Vec::with_capacity(self.buffer.len());
std::mem::swap(&mut buf, &mut self.buffer);
self.data_providers
.push(DataProvider::new(self.tempdir.path(), buf).unwrap());
}
pub fn push(&mut self, key: Key, value: Value) {
let key = key.encode();
let value = value.encode();
self.buffer_size += key.as_ref().len() + value.as_ref().len();
self.buffer.push(Entry { key, value });
if self.buffer_size > self.buffer_capacity {
self.flush();
}
}
pub fn iter(&mut self) -> CollectorIter<'_> {
CollectorIter {
done: false,
inner: Box::pin(|| {
// If only one data provider is found, then we we can write directly from memory to db without reading any files
if self.data_providers.is_empty() {
self.buffer.sort_unstable();
for entry in self.buffer.drain(..) {
yield Ok((entry.key.into(), entry.value.into()));
}
return Ok(());
}
// Flush buffer one more time
if self.buffer_size != 0 {
self.flush();
}
let mut heap = BinaryHeap::new();
// Anchor each data provider in the heap
for (current_id, data_provider) in self.data_providers.iter_mut().enumerate() {
if let Some((current_key, current_value)) = data_provider.to_next()? {
heap.push(Reverse((
Entry::new(current_key, current_value),
current_id,
)));
}
}
// Take the lowest entry from all data providers in the heap.
while let Some(Reverse((Entry { key, value }, id))) = heap.pop() {
yield Ok((key, value));
if let Some((next_key, next_value)) = self.data_providers[id].to_next()? {
// Insert another from the same data provider unless it's exhausted.
heap.push(Reverse((Entry::new(next_key, next_value), id)));
}
}
Ok(())
}),
}
}
}
pub struct CollectorIter<'a> {
done: bool,
inner: Pin<
Box<
dyn Generator<Yield = anyhow::Result<(Vec<u8>, Vec<u8>)>, Return = anyhow::Result<()>>
+ Send
+ 'a,
>,
>,
}
impl<'a> Iterator for CollectorIter<'a> {
type Item = anyhow::Result<(Vec<u8>, Vec<u8>)>;
fn next(&mut self) -> Option<Self::Item> {
if self.done {
return None;
}
match Pin::new(&mut self.inner).resume(()) {
GeneratorState::Yielded(res) => Some(res),
GeneratorState::Complete(res) => |
}
}
}
#[derive(Deref, DerefMut, From)]
pub struct TableCollector<'tmp, T>(Collector<'tmp, T::Key, T::Value>)
where
T: Table,
T::Key: TableEncode,
T::Value: TableEncode,
<<T as Table>::Key as TableEncode>::Encoded: Ord,
<<T as Table>::Value as TableEncode>::Encoded: Ord,
Vec<u8>: From<<<T as Table>::Key as TableEncode>::Encoded>,
Vec<u8>: From<<<T as Table>::Value as TableEncode>::Encoded>;
impl<'tmp, T> TableCollector<'tmp, T>
where
T: Table,
T::Key: TableEncode,
T::Value: TableEncode,
<<T as Table>::Key as TableEncode>::Encoded: Ord,
<<T as Table>::Value as TableEncode>::Encoded: Ord,
Vec<u8>: From<<<T as Table>::Key as TableEncode>::Encoded>,
Vec<u8>: From<<<T as Table>::Value as TableEncode>::Encoded>,
{
pub fn new(tempdir: &'tmp TempDir, buffer_capacity: usize) -> Self {
Self(Collector::new(tempdir, buffer_capacity))
}
pub fn into_inner(self) -> Collector<'tmp, T::Key, T::Value> {
self.0
}
#[allow(clippy::type_complexity)]
pub fn load<'tx>(
&mut self,
cursor: &mut MdbxCursor<'tx, RW, ErasedTable<T>>,
) -> anyhow::Result<()> {
for res in self.iter() {
let (k, v) = res?;
cursor.put(k, v)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
kv::{new_mem_database, tables},
models::BlockNumber,
};
#[test]
fn collect_all_at_once() {
// generate random entries
let mut entries: Vec<(_, _)> = (0..10000)
.map(|_| (rand::random(), BlockNumber(rand::random())))
.collect();
let db = new_mem_database().unwrap();
let tx = db.begin_mutable().unwrap();
let temp_dir = tempfile::tempdir().unwrap();
let mut collector = TableCollector::new(&temp_dir, OPTIMAL_BUFFER_CAPACITY);
for (key, value) in entries.clone() {
collector.push(key, value);
}
// Any cursor is fine
let mut cursor = tx.cursor(tables::HeaderNumber.erased()).unwrap();
collector.load(&mut cursor).unwrap();
// We sort the entries and compare them to what is in db
entries.sort_unstable();
for (key, value) in entries {
if let Some(expected_value) = tx.get(tables::HeaderNumber, key).unwrap() {
assert_eq!(value, expected_value);
}
}
}
#[test]
fn collect_chunks() {
fdlimit::raise_fd_limit();
// generate random entries
let mut entries: Vec<(_, _)> = (0..10000)
.map(|_| (rand::random(), BlockNumber(rand::random())))
.collect();
let db = new_mem_database().unwrap();
let tx = db.begin_mutable().unwrap();
let temp_dir = tempfile::tempdir().unwrap();
let mut collector = TableCollector::new(&temp_dir, 1000);
for (key, value) in entries.clone() {
collector.push(key, value);
}
// Any cursor is fine
let mut cursor = tx.cursor(tables::HeaderNumber.erased()).unwrap();
collector.load(&mut cursor).unwrap();
// We sort the entries and compare them to what is in db
entries.sort_unstable();
for (key, value) in entries {
if let Some(expected_value) = tx.get(tables::HeaderNumber, key).unwrap() {
assert_eq!(value, expected_value);
}
}
}
}
| {
self.done = true;
match res {
Ok(()) => None,
Err(e) => Some(Err(e)),
}
} |
dns_outline.js | export default "M19,15V19H5V15H19M20,13H4A1,1 0 0,0 3,14V20A1,1 0 0,0 4,21H20A1,1 0 0,0 21,20V14A1,1 0 0,0 20,13M7,18.5A1.5,1.5 0 0,1 5.5,17A1.5,1.5 0 0,1 7,15.5A1.5,1.5 0 0,1 8.5,17A1.5,1.5 0 0,1 7,18.5M19,5V9H5V5H19M20,3H4A1,1 0 0,0 3,4V10A1,1 0 0,0 4,11H20A1,1 0 0,0 21,10V4A1,1 0 0,0 20,3M7,8.5A1.5,1.5 0 0,1 5.5,7A1.5,1.5 0 0,1 7,5.5A1.5,1.5 0 0,1 8.5,7A1.5,1.5 0 0,1 7,8.5Z" | ||
hyperlinkedserializers.py | from django.conf.urls.defaults import patterns, url
from django.test import TestCase
from django.test.client import RequestFactory
from rest_framework import generics, status, serializers
from rest_framework.tests.models import Anchor, BasicModel, ManyToManyModel, BlogPost, BlogPostComment, Album, Photo
factory = RequestFactory()
class BlogPostCommentSerializer(serializers.ModelSerializer):
text = serializers.CharField()
blog_post_url = serializers.HyperlinkedRelatedField(source='blog_post', view_name='blogpost-detail')
class Meta:
model = BlogPostComment
fields = ('text', 'blog_post_url')
class PhotoSerializer(serializers.Serializer):
description = serializers.CharField()
album_url = serializers.HyperlinkedRelatedField(source='album', view_name='album-detail', queryset=Album.objects.all(), slug_field='title', slug_url_kwarg='title')
def restore_object(self, attrs, instance=None):
return Photo(**attrs)
class BasicList(generics.ListCreateAPIView):
model = BasicModel
model_serializer_class = serializers.HyperlinkedModelSerializer
class BasicDetail(generics.RetrieveUpdateDestroyAPIView):
model = BasicModel
model_serializer_class = serializers.HyperlinkedModelSerializer
class AnchorDetail(generics.RetrieveAPIView):
model = Anchor
model_serializer_class = serializers.HyperlinkedModelSerializer
class ManyToManyList(generics.ListAPIView):
model = ManyToManyModel
model_serializer_class = serializers.HyperlinkedModelSerializer
class ManyToManyDetail(generics.RetrieveAPIView):
|
class BlogPostCommentListCreate(generics.ListCreateAPIView):
model = BlogPostComment
serializer_class = BlogPostCommentSerializer
class BlogPostDetail(generics.RetrieveAPIView):
model = BlogPost
class PhotoListCreate(generics.ListCreateAPIView):
model = Photo
model_serializer_class = PhotoSerializer
class AlbumDetail(generics.RetrieveAPIView):
model = Album
urlpatterns = patterns('',
url(r'^basic/$', BasicList.as_view(), name='basicmodel-list'),
url(r'^basic/(?P<pk>\d+)/$', BasicDetail.as_view(), name='basicmodel-detail'),
url(r'^anchor/(?P<pk>\d+)/$', AnchorDetail.as_view(), name='anchor-detail'),
url(r'^manytomany/$', ManyToManyList.as_view(), name='manytomanymodel-list'),
url(r'^manytomany/(?P<pk>\d+)/$', ManyToManyDetail.as_view(), name='manytomanymodel-detail'),
url(r'^posts/(?P<pk>\d+)/$', BlogPostDetail.as_view(), name='blogpost-detail'),
url(r'^comments/$', BlogPostCommentListCreate.as_view(), name='blogpostcomment-list'),
url(r'^albums/(?P<title>\w[\w-]*)/$', AlbumDetail.as_view(), name='album-detail'),
url(r'^photos/$', PhotoListCreate.as_view(), name='photo-list')
)
class TestBasicHyperlinkedView(TestCase):
urls = 'rest_framework.tests.hyperlinkedserializers'
def setUp(self):
"""
Create 3 BasicModel intances.
"""
items = ['foo', 'bar', 'baz']
for item in items:
BasicModel(text=item).save()
self.objects = BasicModel.objects
self.data = [
{'url': 'http://testserver/basic/%d/' % obj.id, 'text': obj.text}
for obj in self.objects.all()
]
self.list_view = BasicList.as_view()
self.detail_view = BasicDetail.as_view()
def test_get_list_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/basic/')
response = self.list_view(request).render()
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, self.data)
def test_get_detail_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/basic/1')
response = self.detail_view(request, pk=1).render()
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, self.data[0])
class TestManyToManyHyperlinkedView(TestCase):
urls = 'rest_framework.tests.hyperlinkedserializers'
def setUp(self):
"""
Create 3 BasicModel intances.
"""
items = ['foo', 'bar', 'baz']
anchors = []
for item in items:
anchor = Anchor(text=item)
anchor.save()
anchors.append(anchor)
manytomany = ManyToManyModel()
manytomany.save()
manytomany.rel.add(*anchors)
self.data = [{
'url': 'http://testserver/manytomany/1/',
'rel': [
'http://testserver/anchor/1/',
'http://testserver/anchor/2/',
'http://testserver/anchor/3/',
]
}]
self.list_view = ManyToManyList.as_view()
self.detail_view = ManyToManyDetail.as_view()
def test_get_list_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/manytomany/')
response = self.list_view(request).render()
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, self.data)
def test_get_detail_view(self):
"""
GET requests to ListCreateAPIView should return list of objects.
"""
request = factory.get('/manytomany/1/')
response = self.detail_view(request, pk=1).render()
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response.data, self.data[0])
class TestCreateWithForeignKeys(TestCase):
urls = 'rest_framework.tests.hyperlinkedserializers'
def setUp(self):
"""
Create a blog post
"""
self.post = BlogPost.objects.create(title="Test post")
self.create_view = BlogPostCommentListCreate.as_view()
def test_create_comment(self):
data = {
'text': 'A test comment',
'blog_post_url': 'http://testserver/posts/1/'
}
request = factory.post('/comments/', data=data)
response = self.create_view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.post.blogpostcomment_set.count(), 1)
self.assertEqual(self.post.blogpostcomment_set.all()[0].text, 'A test comment')
class TestCreateWithForeignKeysAndCustomSlug(TestCase):
urls = 'rest_framework.tests.hyperlinkedserializers'
def setUp(self):
"""
Create an Album
"""
self.post = Album.objects.create(title='test-album')
self.list_create_view = PhotoListCreate.as_view()
def test_create_photo(self):
data = {
'description': 'A test photo',
'album_url': 'http://testserver/albums/test-album/'
}
request = factory.post('/photos/', data=data)
response = self.list_create_view(request).render()
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(self.post.photo_set.count(), 1)
self.assertEqual(self.post.photo_set.all()[0].description, 'A test photo')
| model = ManyToManyModel
model_serializer_class = serializers.HyperlinkedModelSerializer |
types.ts | export interface Ticket {
userId: string;
app: string;
} | ||
hyperparams.py | """ Hyperparameters for MJC 2D navigation with discontinous target region. """
from __future__ import division
from datetime import datetime
import os.path
import numpy as np
from gps import __file__ as gps_filepath
from gps.agent.mjc.agent_mjc import AgentMuJoCo
from gps.algorithm.algorithm_traj_opt_pilqr import AlgorithmTrajOptPILQR
from gps.algorithm.cost.cost_state import CostState
from gps.algorithm.cost.cost_binary_region import CostBinaryRegion
from gps.algorithm.cost.cost_sum import CostSum
from gps.algorithm.dynamics.dynamics_lr_prior import DynamicsLRPrior
from gps.algorithm.dynamics.dynamics_prior_gmm import DynamicsPriorGMM
from gps.algorithm.traj_opt.traj_opt_pilqr import TrajOptPILQR
from gps.algorithm.policy.lin_gauss_init import init_lqr
from gps.proto.gps_pb2 import JOINT_ANGLES, JOINT_VELOCITIES, \
END_EFFECTOR_POINTS, END_EFFECTOR_POINT_VELOCITIES, ACTION
from gps.gui.config import generate_experiment_info
SENSOR_DIMS = {
JOINT_ANGLES: 2,
JOINT_VELOCITIES: 2,
END_EFFECTOR_POINTS: 3,
END_EFFECTOR_POINT_VELOCITIES: 3,
ACTION: 2,
}
BASE_DIR = '/'.join(str.split(gps_filepath, '/')[:-2])
EXP_DIR = BASE_DIR + '/../experiments/mjc_disc_cost_pilqr_example/'
common = {
'experiment_name': 'my_experiment' + '_' + \
datetime.strftime(datetime.now(), '%m-%d-%y_%H-%M'),
'experiment_dir': EXP_DIR,
'data_files_dir': EXP_DIR + 'data_files/',
'target_filename': EXP_DIR + 'target.npz',
'log_filename': EXP_DIR + 'log.txt',
'conditions': 1,
}
if not os.path.exists(common['data_files_dir']):
os.makedirs(common['data_files_dir']) | 'type': AgentMuJoCo,
'filename': './mjc_models/pointmass_disc_cost.xml',
'x0': [np.array([1., 0., 0., 0.])],
'dt': 0.05,
'substeps': 5,
'conditions': common['conditions'],
'T': 100,
'sensor_dims': SENSOR_DIMS,
'state_include': [JOINT_ANGLES, JOINT_VELOCITIES,
END_EFFECTOR_POINTS, END_EFFECTOR_POINT_VELOCITIES],
'obs_include': [JOINT_ANGLES, JOINT_VELOCITIES,
END_EFFECTOR_POINTS, END_EFFECTOR_POINT_VELOCITIES],
'camera_pos': np.array([5., 6., 6.5, 0., 0., 0.]),
'smooth_noise_var': 3.,
}
algorithm = {
'type': AlgorithmTrajOptPILQR,
'conditions': common['conditions'],
'iterations': 20,
'step_rule': 'res_percent',
'step_rule_res_ratio_inc': 0.05,
'step_rule_res_ratio_dec': 0.2,
}
algorithm['init_traj_distr'] = {
'type': init_lqr,
'init_var': 20.,
'dt': agent['dt'],
'T': agent['T'],
}
state_cost = {
'type': CostState,
'data_types' : {
END_EFFECTOR_POINTS: {
'wp': np.ones(SENSOR_DIMS[END_EFFECTOR_POINTS]),
'target_state': np.array([3., 0, 0]),
},
},
}
binary_region_cost = {
'type': CostBinaryRegion,
'data_types' : {
END_EFFECTOR_POINTS: {
'wp': np.ones(SENSOR_DIMS[END_EFFECTOR_POINTS]),
'target_state': np.array([2.5, 0.5, 0]),
'max_distance': 0.3,
'outside_cost': 0.,
'inside_cost': -3.,
},
},
}
algorithm['cost'] = {
'type': CostSum,
'costs': [state_cost, binary_region_cost],
'weights': [1., 10.],
}
algorithm['dynamics'] = {
'type': DynamicsLRPrior,
'regularization': 1e-6,
'prior': {
'type': DynamicsPriorGMM,
'max_clusters': 2,
'min_samples_per_cluster': 40,
'max_samples': 20,
}
}
algorithm['traj_opt'] = {
'type': TrajOptPILQR,
'kl_threshold': 1.0,
}
algorithm['policy_opt'] = {}
config = {
'iterations': algorithm['iterations'],
'num_samples': 20,
'verbose_trials': 1,
'common': common,
'agent': agent,
'gui_on': True,
'algorithm': algorithm,
}
common['info'] = generate_experiment_info(config) |
agent = { |
mail.go | package cmd
| rootCmd.AddCommand(mailCmd)
}
var mailCmd = &cobra.Command{
Use: "mail",
Short: "Mail commands",
} | import "github.com/spf13/cobra"
func init() { |
getMarks.ts | import { Editor } from 'slate';
import { EMarks } from '../text/TText';
import { TEditor, Value } from './TEditor';
| */
export const getMarks = <V extends Value>(editor: TEditor<V>) =>
Editor.marks(editor as any) as Partial<EMarks<V>> | null; | /**
* Get the marks that would be added to text at the current selection. |
gui.py | import tkinter as tk
import threading
from tkinter import scrolledtext
from tkinter import messagebox
ENCODING = 'utf-8'
class GUI(threading.Thread):
def __init__(self, client):
super().__init__(daemon=False, target=self.run)
self.font = ('Helvetica', 13)
self.client = client
self.login_window = None
self.main_window = None
def run(self):
self.login_window = LoginWindow(self, self.font)
self.main_window = ChatWindow(self, self.font)
self.notify_server(self.login_window.login, 'login')
self.main_window.run()
@staticmethod
def display_alert(message):
"""Display alert box"""
messagebox.showinfo('Error', message)
def update_login_list(self, active_users):
"""Update login list in main window with list of users"""
self.main_window.update_login_list(active_users)
def display_message(self, message):
"""Display message in ChatWindow"""
self.main_window.display_message(message)
def send_message(self, message):
"""Enqueue message in client's queue"""
# add
print('GUI sent: ' + message)
if self.client.target == 'ALL':
act = '2'
else:
act = '1 ' + self.client.target
self.client.queue.put(self.client.encapsulate(message, action=act))
def set_target(self, target):
"""Set target for messages"""
self.client.target = target
def notify_server(self, message, action):
"""Notify server after action was performed"""
#data = action + ";" + message
data = message
# data = data.encode(ENCODING) do not encode before sending!
self.client.notify_server(data, action)
def login(self, login):
self.client.notify_server(login, 'login')
def logout(self, logout):
self.client.notify_server(logout, 'logout')
class Window(object):
def __init__(self, title, font):
self.root = tk.Tk()
self.title = title
self.root.title(title)
self.font = font
class LoginWindow(Window):
|
class ChatWindow(Window):
def __init__(self, gui, font):
super().__init__("Secret Chat", font)
self.gui = gui
self.messages_list = None
self.logins_list = None
self.entry = None
self.send_button = None
self.exit_button = None
self.lock = threading.RLock()
self.target = ''
self.login = self.gui.login_window.login
self.build_window()
def build_window(self):
"""Build chat window, set widgets positioning and event bindings"""
# Size config
self.root.geometry('750x500')
self.root.minsize(600, 400)
# Frames config
main_frame = tk.Frame(self.root)
main_frame.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E)
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(0, weight=1)
# swap frame00 and frame01
# List of messages
frame00 = tk.Frame(main_frame)
frame00.grid(column=1, row=0, rowspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
# List of logins
frame01 = tk.Frame(main_frame)
frame01.grid(column=0, row=0, rowspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
# Message entry
frame02 = tk.Frame(main_frame)
frame02.grid(column=0, row=2, columnspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
# Buttons
frame03 = tk.Frame(main_frame)
frame03.grid(column=0, row=3, columnspan=2, sticky=tk.N + tk.S + tk.W + tk.E)
main_frame.rowconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
main_frame.rowconfigure(2, weight=8)
main_frame.columnconfigure(0, weight=1)
main_frame.columnconfigure(1, weight=1)
# ScrolledText widget for displaying messages
self.messages_list = scrolledtext.ScrolledText(frame00, wrap='word', font=self.font)
self.messages_list.insert(tk.END, 'Start Your Secret Chat\n\n')
self.messages_list.configure(state='disabled')
# Listbox widget for displaying active users and selecting them
self.logins_list = tk.Listbox(frame01, selectmode=tk.SINGLE, font=self.font,
exportselection=False)
self.logins_list.bind('<<ListboxSelect>>', self.selected_login_event)
# Entry widget for typing messages in
self.entry = tk.Text(frame02, font=self.font)
self.entry.focus_set()
self.entry.bind('<Return>', self.send_entry_event)
# Button widget for sending messages
self.send_button = tk.Button(frame03, text='Send Message', font=self.font)
self.send_button.bind('<Button-1>', self.send_entry_event)
# Button for exiting
self.exit_button = tk.Button(frame03, text='Exit', font=self.font)
self.exit_button.bind('<Button-1>', self.exit_event)
# Positioning widgets in frame
self.logins_list.pack(fill=tk.BOTH, expand=tk.YES, side=tk.LEFT)
self.messages_list.pack(fill=tk.BOTH, expand=tk.YES, side=tk.LEFT)
self.entry.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
self.send_button.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
self.exit_button.pack(side=tk.LEFT, fill=tk.BOTH, expand=tk.YES)
# Protocol for closing window using 'x' button
self.root.protocol("WM_DELETE_WINDOW", self.on_closing_event)
def run(self):
"""Handle chat window actions"""
self.root.mainloop()
self.root.destroy()
def selected_login_event(self, event):
"""Set as target currently selected login on login list"""
target = self.logins_list.get(self.logins_list.curselection())
self.target = target
self.gui.set_target(target)
def send_entry_event(self, event):
"""Send message from entry field to target"""
text = self.entry.get(1.0, tk.END)
if text != '\n':
#message = 'msg;' + self.login + ';' + self.target + ';' + text[:-1]
message = text[:-1]
print(message)
self.gui.send_message(message)
self.entry.mark_set(tk.INSERT, 1.0)
self.entry.delete(1.0, tk.END)
self.entry.focus_set()
else:
messagebox.showinfo('Warning', 'You must enter non-empty message')
with self.lock:
self.messages_list.configure(state='normal')
if text != '\n':
self.messages_list.insert(tk.END, text)
self.messages_list.configure(state='disabled')
self.messages_list.see(tk.END)
return 'break'
def exit_event(self, event):
"""Send logout message and quit app when "Exit" pressed"""
self.gui.notify_server(self.login, 'logout')
self.root.quit()
def on_closing_event(self):
"""Exit window when 'x' button is pressed"""
self.exit_event(None)
def display_message(self, message):
"""Display message in ScrolledText widget"""
with self.lock:
self.messages_list.configure(state='normal')
self.messages_list.insert(tk.END, message)
self.messages_list.configure(state='disabled')
self.messages_list.see(tk.END)
def update_login_list(self, active_users):
"""Update listbox with list of active users"""
self.logins_list.delete(0, tk.END)
for user in active_users:
self.logins_list.insert(tk.END, user)
self.logins_list.select_set(0)
self.target = self.logins_list.get(self.logins_list.curselection()) | def __init__(self, gui, font):
super().__init__("Login", font)
self.gui = gui
self.label = None
self.entry = None
self.button = None
self.login = None
self.build_window()
self.run()
def build_window(self):
"""Build login window, , set widgets positioning and event bindings"""
welcome_text = "Welcome to SECRET CHAT.\nEnter your name."
self.label = tk.Label(self.root, text=welcome_text, width=30, height=5, font=self.font)
self.label.pack(side=tk.TOP, expand=tk.YES)
self.entry = tk.Entry(self.root, width=15, font=self.font)
self.entry.focus_set()
self.entry.pack(side=tk.LEFT, expand=tk.YES)
self.entry.bind('<Return>', self.get_login_event)
self.button = tk.Button(self.root, text='Login', font=self.font)
self.button.pack(side=tk.LEFT, expand=tk.YES)
self.button.bind('<Button-1>', self.get_login_event)
def run(self):
"""Handle login window actions"""
self.root.mainloop()
self.root.destroy()
def get_login_event(self, event):
"""Get login from login box and close login window"""
self.login = self.entry.get()
self.root.quit() |
binop.rs | use codespan_reporting::diagnostic::Diagnostic;
use diagnostics::{span::Span, RtError};
use eyre::Result;
use parser::BinOp;
use value::{BigValue, HeapKey, Value};
use crate::RT;
macro_rules! binop_match {
(
$bindings:expr,
// Integer math ops
{ $($math_op_name:path => $math_op:tt),*$(,)? },
// Comparison
{ $($comarison_name:path => $comparison_op:tt),*$(,)? },
// Misc user stuff
{ $($user_lhs:pat => $user_rhs:expr),*$(,)? },
) => {
// TODO: decide if this is a good idea
#[allow(clippy::float_cmp)]
match $bindings {
$(
// TODO: Should we coerce int to float in 1 + 2.0
($math_op_name, Int(l), Int(r)) => Int(l $math_op r),
($math_op_name, Float(l), Float(r)) => Float(l $math_op r),
)*
$(
($comarison_name, Int(l), Int(r)) => Bool(l $comparison_op r),
($comarison_name, Float(l), Float(r)) => Bool(l $comparison_op r),
)*
$( $user_lhs => $user_rhs, )*
}
};
}
pub fn binop<T: RT>(
this: &mut T,
l: Value,
o: BinOp,
r: Value,
l_span: Span,
o_span: Span,
r_span: Span,
) -> Result<Value, RtError> {
use Value::*;
Ok(binop_match!(
(o, l, r),
{
BinOp::Plus => +,
BinOp::Minus => -,
BinOp::Times => *,
BinOp::Devide => /,
},
{
BinOp::GreaterThan => >,
BinOp::GreaterThanEquals => >=,
BinOp::LessThan => <,
BinOp::LessThanEquals => <=,
},
{
(BinOp::LogicalOr, Bool(l), Bool(r)) => Bool (l || r),
(BinOp::LogicalAnd, Bool(l), Bool(r)) => Bool (l && r),
(BinOp::Equals, l, r) => Value::Bool(binop_eq(this,l, o, r, l_span, o_span, r_span)?),
(BinOp::NotEquals, l, r) => Value::Bool(!binop_eq(this,l, o, r, l_span, o_span, r_span)?),
(op, Complex(lid), Complex(rid)) => complex_binop(this,
lid,
op,
rid,
l_span,
o_span,
r_span,
)?,
(o, l, r) => return Err(binop_err(this,l, o, r, l_span, o_span, r_span))
}, | this: &T,
l: Value,
o: BinOp,
r: Value,
l_span: Span,
o_span: Span,
r_span: Span,
) -> Result<bool, RtError> {
Ok(match (l, r) {
// Simple cases
(Value::Int(l), Value::Int(r)) => l == r,
(Value::Float(l), Value::Float(r)) => l == r,
(Value::Bool(l), Value::Bool(r)) => l == r,
(Value::Null, Value::Null) => true,
(Value::Complex(lid), Value::Complex(rid)) => {
if lid == rid {
return Ok(true);
}
match (&this.heap()[lid], &this.heap()[rid]) {
(BigValue::String(l), BigValue::String(r)) => l == r,
(BigValue::Array(ls), BigValue::Array(rs)) => {
if ls.len() != rs.len() {
false
} else {
// Iterate over all the items, and equal each one, returning true if their all true,
// or the first error
ls.iter().zip(rs).try_fold(true, |acc, (l, r)| {
Ok(
// TODO: l_span and `o_span` are wrong.
acc && binop_eq(this, *l, o, *r, l_span, o_span, r_span)?,
)
})?
}
}
(BigValue::Map(lm), BigValue::Map(rm)) => {
if lm.len() != rm.len() {
false
} else {
for ((lk, lv), (rk, rv)) in lm.iter().zip(rm) {
if lk != rk || !binop_eq(this, *lv, o, *rv, l_span, o_span, r_span)? {
return Ok(false);
}
}
true
}
}
_ => {
return Err(binop_err(
this,
Value::Complex(lid),
o,
Value::Complex(rid),
l_span,
o_span,
r_span,
))
}
}
}
(l, r) => return Err(binop_err(this, l, o, r, l_span, o_span, r_span)),
})
}
fn complex_binop<T: RT>(
this: &mut T,
lid: HeapKey,
o: BinOp,
rid: HeapKey,
l_span: Span,
o_span: Span,
r_span: Span,
) -> Result<Value, RtError> {
Ok(match (o, &this.heap()[lid], &this.heap()[rid]) {
// (BinOp::Equals, _, _) => Value::Bool(binop_eq(this,lid, rid, l_span, o_span, r_span)?),
// (BinOp::NotEquals, _, _) => {
// Value::Bool(!binop_eq(this,lid, rid, l_span, o_span, r_span)?)
// }
(BinOp::Plus, BigValue::String(l), BigValue::String(r)) => {
let res = l.to_owned() + r;
let key = this.heap_mut().insert(BigValue::String(res));
Value::Complex(key)
}
_ => {
return Err(binop_err(
this,
Value::Complex(lid),
o,
Value::Complex(rid),
l_span,
o_span,
r_span,
))
}
})
}
fn binop_err<T: RT>(
this: &T,
l: Value,
o: BinOp,
r: Value,
l_span: Span,
o_span: Span,
r_span: Span,
) -> RtError {
RtError(
Diagnostic::error()
.with_message(format!(
"Unknown binop `{}`, for `{}` and `{}`",
o,
this.type_name(&l),
this.type_name(&r),
))
.with_labels(vec![
o_span.primary_label().with_message("In this operator"),
this.evaled_to(l, l_span),
this.evaled_to(r, r_span),
]),
)
} | ))
}
fn binop_eq<T: RT>( |
__init__.py | """
Component to interface with an alarm control panel.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel/
"""
import logging
import os
from homeassistant.components import verisure
from homeassistant.const import (
ATTR_CODE, ATTR_CODE_FORMAT, ATTR_ENTITY_ID, SERVICE_ALARM_TRIGGER,
SERVICE_ALARM_DISARM, SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_ARM_AWAY)
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
DOMAIN = 'alarm_control_panel'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
# Maps discovered services to their platforms
DISCOVERY_PLATFORMS = {
verisure.DISCOVER_ALARMS: 'verisure'
}
SERVICE_TO_METHOD = {
SERVICE_ALARM_DISARM: 'alarm_disarm',
SERVICE_ALARM_ARM_HOME: 'alarm_arm_home',
SERVICE_ALARM_ARM_AWAY: 'alarm_arm_away',
SERVICE_ALARM_TRIGGER: 'alarm_trigger'
}
ATTR_TO_PROPERTY = [
ATTR_CODE,
ATTR_CODE_FORMAT
]
def setup(hass, config):
"""Track states and offer events for sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL,
DISCOVERY_PLATFORMS)
component.setup(config)
def alarm_service_handler(service):
|
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
for service in SERVICE_TO_METHOD:
hass.services.register(DOMAIN, service, alarm_service_handler,
descriptions.get(service))
return True
def alarm_disarm(hass, code=None, entity_id=None):
"""Send the alarm the command for disarm."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_DISARM, data)
def alarm_arm_home(hass, code=None, entity_id=None):
"""Send the alarm the command for arm home."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_ARM_HOME, data)
def alarm_arm_away(hass, code=None, entity_id=None):
"""Send the alarm the command for arm away."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_ARM_AWAY, data)
def alarm_trigger(hass, code=None, entity_id=None):
"""Send the alarm the command for trigger."""
data = {}
if code:
data[ATTR_CODE] = code
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_ALARM_TRIGGER, data)
# pylint: disable=no-self-use
class AlarmControlPanel(Entity):
"""An abstract class for alarm control devices."""
@property
def code_format(self):
"""Regex for code format or None if no code is required."""
return None
def alarm_disarm(self, code=None):
"""Send disarm command."""
raise NotImplementedError()
def alarm_arm_home(self, code=None):
"""Send arm home command."""
raise NotImplementedError()
def alarm_arm_away(self, code=None):
"""Send arm away command."""
raise NotImplementedError()
def alarm_trigger(self, code=None):
"""Send alarm trigger command."""
raise NotImplementedError()
@property
def state_attributes(self):
"""Return the state attributes."""
state_attr = {
ATTR_CODE_FORMAT: self.code_format,
}
return state_attr
| """Map services to methods on Alarm."""
target_alarms = component.extract_from_service(service)
if ATTR_CODE not in service.data:
code = None
else:
code = service.data[ATTR_CODE]
method = SERVICE_TO_METHOD[service.service]
for alarm in target_alarms:
getattr(alarm, method)(code)
if alarm.should_poll:
alarm.update_ha_state(True) |
histogram.py | from typing import Optional, Tuple
import torch
def marginal_pdf(
values: torch.Tensor, bins: torch.Tensor, sigma: torch.Tensor, epsilon: float = 1e-10
) -> Tuple[torch.Tensor, torch.Tensor]:
|
def joint_pdf(kernel_values1: torch.Tensor, kernel_values2: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:
"""Calculate the joint probability distribution function of the input tensors based on the number of histogram
bins.
Args:
kernel_values1: shape [BxNxNUM_BINS].
kernel_values2: shape [BxNxNUM_BINS].
epsilon: scalar, for numerical stability.
Returns:
shape [BxNUM_BINSxNUM_BINS].
"""
if not isinstance(kernel_values1, torch.Tensor):
raise TypeError(f"Input kernel_values1 type is not a torch.Tensor. Got {type(kernel_values1)}")
if not isinstance(kernel_values2, torch.Tensor):
raise TypeError(f"Input kernel_values2 type is not a torch.Tensor. Got {type(kernel_values2)}")
if not kernel_values1.dim() == 3:
raise ValueError("Input kernel_values1 must be a of the shape BxN." " Got {}".format(kernel_values1.shape))
if not kernel_values2.dim() == 3:
raise ValueError("Input kernel_values2 must be a of the shape BxN." " Got {}".format(kernel_values2.shape))
if kernel_values1.shape != kernel_values2.shape:
raise ValueError(
"Inputs kernel_values1 and kernel_values2 must have the same shape."
" Got {} and {}".format(kernel_values1.shape, kernel_values2.shape)
)
joint_kernel_values = torch.matmul(kernel_values1.transpose(1, 2), kernel_values2)
normalization = torch.sum(joint_kernel_values, dim=(1, 2)).view(-1, 1, 1) + epsilon
pdf = joint_kernel_values / normalization
return pdf
def histogram(x: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:
"""Estimate the histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
Args:
x: Input tensor to compute the histogram with shape :math:`(B, D)`.
bins: The number of bins to use the histogram :math:`(N_{bins})`.
bandwidth: Gaussian smoothing factor with shape shape [1].
epsilon: A scalar, for numerical stability.
Returns:
Computed histogram of shape :math:`(B, N_{bins})`.
Examples:
>>> x = torch.rand(1, 10)
>>> bins = torch.torch.linspace(0, 255, 128)
>>> hist = histogram(x, bins, bandwidth=torch.tensor(0.9))
>>> hist.shape
torch.Size([1, 128])
"""
pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)
return pdf
def histogram2d(
x1: torch.Tensor, x2: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10
) -> torch.Tensor:
"""Estimate the 2d histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
Args:
x1: Input tensor to compute the histogram with shape :math:`(B, D1)`.
x2: Input tensor to compute the histogram with shape :math:`(B, D2)`.
bins: The number of bins to use the histogram :math:`(N_{bins})`.
bandwidth: Gaussian smoothing factor with shape shape [1].
epsilon: A scalar, for numerical stability. Default: 1e-10.
Returns:
Computed histogram of shape :math:`(B, N_{bins}), N_{bins})`.
Examples:
>>> x1 = torch.rand(2, 32)
>>> x2 = torch.rand(2, 32)
>>> bins = torch.torch.linspace(0, 255, 128)
>>> hist = histogram2d(x1, x2, bins, bandwidth=torch.tensor(0.9))
>>> hist.shape
torch.Size([2, 128, 128])
"""
_, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon)
_, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon)
pdf = joint_pdf(kernel_values1, kernel_values2)
return pdf
def image_histogram2d(
image: torch.Tensor,
min: float = 0.0,
max: float = 255.0,
n_bins: int = 256,
bandwidth: Optional[float] = None,
centers: Optional[torch.Tensor] = None,
return_pdf: bool = False,
kernel: str = "triangular",
eps: float = 1e-10,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Estimate the histogram of the input image(s).
The calculation uses triangular kernel density estimation.
Args:
image: Input tensor to compute the histogram with shape
:math:`(H, W)`, :math:`(C, H, W)` or :math:`(B, C, H, W)`.
min: Lower end of the interval (inclusive).
max: Upper end of the interval (inclusive). Ignored when
:attr:`centers` is specified.
n_bins: The number of histogram bins. Ignored when
:attr:`centers` is specified.
bandwidth: Smoothing factor. If not specified or equal to -1,
:math:`(bandwidth = (max - min) / n_bins)`.
centers: Centers of the bins with shape :math:`(n_bins,)`.
If not specified or empty, it is calculated as centers of
equal width bins of [min, max] range.
return_pdf: If True, also return probability densities for
each bin.
kernel: kernel to perform kernel density estimation
``(`triangular`, `gaussian`, `uniform`, `epanechnikov`)``.
Returns:
Computed histogram of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`.
Computed probability densities of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`, if return_pdf is ``True``. Tensor of zeros with shape
of the histogram otherwise.
"""
if image is not None and not isinstance(image, torch.Tensor):
raise TypeError(f"Input image type is not a torch.Tensor. Got {type(image)}.")
if centers is not None and not isinstance(centers, torch.Tensor):
raise TypeError(f"Bins' centers type is not a torch.Tensor. Got {type(centers)}.")
if centers is not None and len(centers.shape) > 0 and centers.dim() != 1:
raise ValueError(f"Bins' centers must be a torch.Tensor of the shape (n_bins,). Got {centers.shape}.")
if not isinstance(min, float):
raise TypeError(f'Type of lower end of the range is not a float. Got {type(min)}.')
if not isinstance(max, float):
raise TypeError(f"Type of upper end of the range is not a float. Got {type(min)}.")
if not isinstance(n_bins, int):
raise TypeError(f"Type of number of bins is not an int. Got {type(n_bins)}.")
if bandwidth is not None and not isinstance(bandwidth, float):
raise TypeError(f"Bandwidth type is not a float. Got {type(bandwidth)}.")
if not isinstance(return_pdf, bool):
raise TypeError(f"Return_pdf type is not a bool. Got {type(return_pdf)}.")
if bandwidth is None:
bandwidth = (max - min) / n_bins
if centers is None:
centers = min + bandwidth * (torch.arange(n_bins, device=image.device, dtype=image.dtype) + 0.5)
centers = centers.reshape(-1, 1, 1, 1, 1)
u = torch.abs(image.unsqueeze(0) - centers) / bandwidth
if kernel == "gaussian":
kernel_values = torch.exp(-0.5 * u ** 2)
elif kernel in ("triangular", "uniform", "epanechnikov",):
# compute the mask and cast to floating point
mask = (u <= 1).to(u.dtype)
if kernel == "triangular":
kernel_values = (1. - u) * mask
elif kernel == "uniform":
kernel_values = torch.ones_like(u) * mask
else: # kernel == "epanechnikov"
kernel_values = (1. - u ** 2) * mask
else:
raise ValueError(f"Kernel must be 'triangular', 'gaussian', " f"'uniform' or 'epanechnikov'. Got {kernel}.")
hist = torch.sum(kernel_values, dim=(-2, -1)).permute(1, 2, 0)
if return_pdf:
normalization = torch.sum(hist, dim=-1, keepdim=True) + eps
pdf = hist / normalization
if image.dim() == 2:
hist = hist.squeeze()
pdf = pdf.squeeze()
elif image.dim() == 3:
hist = hist.squeeze(0)
pdf = pdf.squeeze(0)
return hist, pdf
if image.dim() == 2:
hist = hist.squeeze()
elif image.dim() == 3:
hist = hist.squeeze(0)
return hist, torch.zeros_like(hist)
| """Calculate the marginal probability distribution function of the input tensor based on the number of
histogram bins.
Args:
values: shape [BxNx1].
bins: shape [NUM_BINS].
sigma: shape [1], gaussian smoothing factor.
epsilon: scalar, for numerical stability.
Returns:
Tuple[torch.Tensor, torch.Tensor]:
- torch.Tensor: shape [BxN].
- torch.Tensor: shape [BxNxNUM_BINS].
"""
if not isinstance(values, torch.Tensor):
raise TypeError(f"Input values type is not a torch.Tensor. Got {type(values)}")
if not isinstance(bins, torch.Tensor):
raise TypeError(f"Input bins type is not a torch.Tensor. Got {type(bins)}")
if not isinstance(sigma, torch.Tensor):
raise TypeError(f"Input sigma type is not a torch.Tensor. Got {type(sigma)}")
if not values.dim() == 3:
raise ValueError("Input values must be a of the shape BxNx1." " Got {}".format(values.shape))
if not bins.dim() == 1:
raise ValueError("Input bins must be a of the shape NUM_BINS" " Got {}".format(bins.shape))
if not sigma.dim() == 0:
raise ValueError("Input sigma must be a of the shape 1" " Got {}".format(sigma.shape))
residuals = values - bins.unsqueeze(0).unsqueeze(0)
kernel_values = torch.exp(-0.5 * (residuals / sigma).pow(2))
pdf = torch.mean(kernel_values, dim=1)
normalization = torch.sum(pdf, dim=1).unsqueeze(1) + epsilon
pdf = pdf / normalization
return pdf, kernel_values |
difficulty.go | // Copyright (c) 2013-2016 The btcsuite developers
// Copyright (c) 2015-2018 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wallet
// This code was copied from dcrd/blockchain/difficulty.go and modified for
// dcrwallet's header storage.
import (
"math/big"
"time"
"github.com/decred/dcrd/blockchain"
"github.com/decred/dcrd/chaincfg"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/dcrutil"
"github.com/decred/dcrd/wire"
"github.com/decred/dcrwallet/deployments"
"github.com/decred/dcrwallet/errors"
"github.com/decred/dcrwallet/wallet/v2/walletdb"
)
// maxShift is the maximum shift for a difficulty that resets (e.g.
// testnet difficulty).
const maxShift uint = 256
var (
// bigZero is 0 represented as a big.Int. It is defined here to avoid
// the overhead of creating it multiple times.
bigZero = big.NewInt(0)
// bigOne is 1 represented as a big.Int. It is defined here to avoid
// the overhead of creating it multiple times.
bigOne = big.NewInt(1)
// oneLsh256 is 1 shifted left 256 bits. It is defined here to avoid
// the overhead of creating it multiple times.
oneLsh256 = new(big.Int).Lsh(bigOne, 256)
)
// findPrevTestNetDifficulty returns the difficulty of the previous block which
// did not have the special testnet minimum difficulty rule applied.
func (w *Wallet) findPrevTestNetDifficulty(dbtx walletdb.ReadTx, h *wire.BlockHeader, chain []*BlockNode) (uint32, error) {
// Search backwards through the chain for the last block without
// the special rule applied.
blocksPerRetarget := w.chainParams.WorkDiffWindowSize * w.chainParams.WorkDiffWindows
for int64(h.Height)%blocksPerRetarget != 0 && h.Bits == w.chainParams.PowLimitBits {
if h.PrevBlock == (chainhash.Hash{}) {
h = nil
break
}
if len(chain) > 0 && int32(h.Height)-int32(chain[0].Header.Height) > 0 {
h = chain[h.Height-chain[0].Header.Height-1].Header
} else {
var err error
h, err = w.TxStore.GetBlockHeader(dbtx, &h.PrevBlock)
if err != nil {
return 0, err
}
}
}
// Return the found difficulty or the minimum difficulty if no
// appropriate block was found.
lastBits := w.chainParams.PowLimitBits
if h != nil {
lastBits = h.Bits
}
return lastBits, nil
}
// nextRequiredPoWDifficulty calculates the required proof-of-work difficulty
// for the block that references header as a parent.
func (w *Wallet) nextRequiredPoWDifficulty(dbtx walletdb.ReadTx, header *wire.BlockHeader, chain []*BlockNode, newBlockTime time.Time) (uint32, error) {
// Get the old difficulty; if we aren't at a block height where it changes,
// just return this.
oldDiff := header.Bits
oldDiffBig := blockchain.CompactToBig(header.Bits)
// We're not at a retarget point, return the oldDiff.
if (int64(header.Height)+1)%w.chainParams.WorkDiffWindowSize != 0 {
// For networks that support it, allow special reduction of the
// required difficulty once too much time has elapsed without
// mining a block.
if w.chainParams.ReduceMinDifficulty {
// Return minimum difficulty when more than the desired
// amount of time has elapsed without mining a block.
reductionTime := int64(w.chainParams.MinDiffReductionTime /
time.Second)
allowMinTime := header.Timestamp.Unix() + reductionTime
if newBlockTime.Unix() > allowMinTime {
return w.chainParams.PowLimitBits, nil
}
// The block was mined within the desired timeframe, so
// return the difficulty for the last block which did
// not have the special minimum difficulty rule applied.
return w.findPrevTestNetDifficulty(dbtx, header, chain)
}
return oldDiff, nil
}
// Declare some useful variables.
RAFBig := big.NewInt(w.chainParams.RetargetAdjustmentFactor)
nextDiffBigMin := blockchain.CompactToBig(header.Bits)
nextDiffBigMin.Div(nextDiffBigMin, RAFBig)
nextDiffBigMax := blockchain.CompactToBig(header.Bits)
nextDiffBigMax.Mul(nextDiffBigMax, RAFBig)
alpha := w.chainParams.WorkDiffAlpha
// Number of nodes to traverse while calculating difficulty.
nodesToTraverse := (w.chainParams.WorkDiffWindowSize *
w.chainParams.WorkDiffWindows)
// Initialize bigInt slice for the percentage changes for each window period
// above or below the target.
windowChanges := make([]*big.Int, w.chainParams.WorkDiffWindows)
// Regress through all of the previous blocks and store the percent changes
// per window period; use bigInts to emulate 64.32 bit fixed point.
var olderTime, windowPeriod int64
var weights uint64
oldHeader := header
recentTime := header.Timestamp.Unix()
for i := int64(0); ; i++ {
// Store and reset after reaching the end of every window period.
if i%w.chainParams.WorkDiffWindowSize == 0 && i != 0 {
olderTime = oldHeader.Timestamp.Unix()
timeDifference := recentTime - olderTime
// Just assume we're at the target (no change) if we've
// gone all the way back to the genesis block.
if oldHeader.Height == 0 {
timeDifference = int64(w.chainParams.TargetTimespan /
time.Second)
}
timeDifBig := big.NewInt(timeDifference)
timeDifBig.Lsh(timeDifBig, 32) // Add padding
targetTemp := big.NewInt(int64(w.chainParams.TargetTimespan /
time.Second))
windowAdjusted := targetTemp.Div(timeDifBig, targetTemp)
// Weight it exponentially. Be aware that this could at some point
// overflow if alpha or the number of blocks used is really large.
windowAdjusted = windowAdjusted.Lsh(windowAdjusted,
uint((w.chainParams.WorkDiffWindows-windowPeriod)*alpha))
// Sum up all the different weights incrementally.
weights += 1 << uint64((w.chainParams.WorkDiffWindows-windowPeriod)*
alpha)
// Store it in the slice.
windowChanges[windowPeriod] = windowAdjusted
windowPeriod++
recentTime = olderTime
}
if i == nodesToTraverse {
break // Exit for loop when we hit the end.
}
// Get the previous node while staying at the genesis block as needed.
// Query the header from the provided chain instead of database if
// present. The parent of chain[0] is guaranteed to be in stored in the
// database.
if oldHeader.Height != 0 {
if len(chain) > 0 && int32(oldHeader.Height)-int32(chain[0].Header.Height) > 0 {
oldHeader = chain[oldHeader.Height-chain[0].Header.Height-1].Header
} else {
var err error
oldHeader, err = w.TxStore.GetBlockHeader(dbtx, &oldHeader.PrevBlock)
if err != nil {
return 0, err
}
}
}
}
// Sum up the weighted window periods.
weightedSum := big.NewInt(0)
for i := int64(0); i < w.chainParams.WorkDiffWindows; i++ {
weightedSum.Add(weightedSum, windowChanges[i])
}
// Divide by the sum of all weights.
weightsBig := big.NewInt(int64(weights))
weightedSumDiv := weightedSum.Div(weightedSum, weightsBig)
// Multiply by the old diff.
nextDiffBig := weightedSumDiv.Mul(weightedSumDiv, oldDiffBig)
// Right shift to restore the original padding (restore non-fixed point).
nextDiffBig = nextDiffBig.Rsh(nextDiffBig, 32)
// Check to see if we're over the limits for the maximum allowable retarget;
// if we are, return the maximum or minimum except in the case that oldDiff
// is zero.
if oldDiffBig.Cmp(bigZero) == 0 { // This should never really happen,
nextDiffBig.Set(nextDiffBig) // but in case it does...
} else if nextDiffBig.Cmp(bigZero) == 0 {
nextDiffBig.Set(w.chainParams.PowLimit)
} else if nextDiffBig.Cmp(nextDiffBigMax) == 1 {
nextDiffBig.Set(nextDiffBigMax)
} else if nextDiffBig.Cmp(nextDiffBigMin) == -1 {
nextDiffBig.Set(nextDiffBigMin)
}
// Limit new value to the proof of work limit.
if nextDiffBig.Cmp(w.chainParams.PowLimit) > 0 {
nextDiffBig.Set(w.chainParams.PowLimit)
}
// Log new target difficulty and return it. The new target logging is
// intentionally converting the bits back to a number instead of using
// newTarget since conversion to the compact representation loses
// precision.
nextDiffBits := blockchain.BigToCompact(nextDiffBig)
log.Debugf("Difficulty retarget at block height %d", header.Height+1)
log.Debugf("Old target %08x (%064x)", header.Bits, oldDiffBig)
log.Debugf("New target %08x (%064x)", nextDiffBits, blockchain.CompactToBig(nextDiffBits))
return nextDiffBits, nil
}
// estimateSupply returns an estimate of the coin supply for the provided block
// height. This is primarily used in the stake difficulty algorithm and relies
// on an estimate to simplify the necessary calculations. The actual total
// coin supply as of a given block height depends on many factors such as the
// number of votes included in every prior block (not including all votes
// reduces the subsidy) and whether or not any of the prior blocks have been
// invalidated by stakeholders thereby removing the PoW subsidy for them.
func estimateSupply(params *chaincfg.Params, height int64) int64 {
if height <= 0 {
return 0
}
// Estimate the supply by calculating the full block subsidy for each
// reduction interval and multiplying it the number of blocks in the
// interval then adding the subsidy produced by number of blocks in the
// current interval.
supply := params.BlockOneSubsidy()
reductions := height / params.SubsidyReductionInterval
subsidy := params.BaseSubsidy
for i := int64(0); i < reductions; i++ {
supply += params.SubsidyReductionInterval * subsidy
subsidy *= params.MulSubsidy
subsidy /= params.DivSubsidy
}
supply += (1 + height%params.SubsidyReductionInterval) * subsidy
// Blocks 0 and 1 have special subsidy amounts that have already been
// added above, so remove what their subsidies would have normally been
// which were also added above.
supply -= params.BaseSubsidy * 2
return supply
}
// sumPurchasedTickets returns the sum of the number of tickets purchased in the
// most recent specified number of blocks from the point of view of the passed
// header.
func (w *Wallet) sumPurchasedTickets(dbtx walletdb.ReadTx, startHeader *wire.BlockHeader, chain []*BlockNode, numToSum int64) (int64, error) {
var numPurchased int64
for h, numTraversed := startHeader, int64(0); h != nil && numTraversed < numToSum; numTraversed++ {
numPurchased += int64(h.FreshStake)
if h.PrevBlock == (chainhash.Hash{}) {
break
}
if len(chain) > 0 && int32(h.Height)-int32(chain[0].Header.Height) > 0 {
h = chain[h.Height-chain[0].Header.Height-1].Header
continue
}
var err error
h, err = w.TxStore.GetBlockHeader(dbtx, &h.PrevBlock)
if err != nil {
return 0, err
}
}
return numPurchased, nil
}
// calcNextStakeDiffV2 calculates the next stake difficulty for the given set
// of parameters using the algorithm defined in DCP0001.
//
// This function contains the heart of the algorithm and thus is separated for
// use in both the actual stake difficulty calculation as well as estimation.
//
// The caller must perform all of the necessary chain traversal in order to
// get the current difficulty, previous retarget interval's pool size plus
// its immature tickets, as well as the current pool size plus immature tickets.
func | (params *chaincfg.Params, nextHeight, curDiff, prevPoolSizeAll, curPoolSizeAll int64) int64 {
// Shorter version of various parameter for convenience.
votesPerBlock := int64(params.TicketsPerBlock)
ticketPoolSize := int64(params.TicketPoolSize)
ticketMaturity := int64(params.TicketMaturity)
// Calculate the difficulty by multiplying the old stake difficulty
// with two ratios that represent a force to counteract the relative
// change in the pool size (Fc) and a restorative force to push the pool
// size towards the target value (Fr).
//
// Per DCP0001, the generalized equation is:
//
// nextDiff = min(max(curDiff * Fc * Fr, Slb), Sub)
//
// The detailed form expands to:
//
// curPoolSizeAll curPoolSizeAll
// nextDiff = curDiff * --------------- * -----------------
// prevPoolSizeAll targetPoolSizeAll
//
// Slb = w.chainParams.MinimumStakeDiff
//
// estimatedTotalSupply
// Sub = -------------------------------
// targetPoolSize / votesPerBlock
//
// In order to avoid the need to perform floating point math which could
// be problematic across languages due to uncertainty in floating point
// math libs, this is further simplified to integer math as follows:
//
// curDiff * curPoolSizeAll^2
// nextDiff = -----------------------------------
// prevPoolSizeAll * targetPoolSizeAll
//
// Further, the Sub parameter must calculate the denomitor first using
// integer math.
targetPoolSizeAll := votesPerBlock * (ticketPoolSize + ticketMaturity)
curPoolSizeAllBig := big.NewInt(curPoolSizeAll)
nextDiffBig := big.NewInt(curDiff)
nextDiffBig.Mul(nextDiffBig, curPoolSizeAllBig)
nextDiffBig.Mul(nextDiffBig, curPoolSizeAllBig)
nextDiffBig.Div(nextDiffBig, big.NewInt(prevPoolSizeAll))
nextDiffBig.Div(nextDiffBig, big.NewInt(targetPoolSizeAll))
// Limit the new stake difficulty between the minimum allowed stake
// difficulty and a maximum value that is relative to the total supply.
//
// NOTE: This is intentionally using integer math to prevent any
// potential issues due to uncertainty in floating point math libs. The
// ticketPoolSize parameter already contains the result of
// (targetPoolSize / votesPerBlock).
nextDiff := nextDiffBig.Int64()
estimatedSupply := estimateSupply(params, nextHeight)
maximumStakeDiff := estimatedSupply / ticketPoolSize
if nextDiff > maximumStakeDiff {
nextDiff = maximumStakeDiff
}
if nextDiff < params.MinimumStakeDiff {
nextDiff = params.MinimumStakeDiff
}
return nextDiff
}
func (w *Wallet) ancestorHeaderAtHeight(dbtx walletdb.ReadTx, h *wire.BlockHeader, chain []*BlockNode, height int32) (*wire.BlockHeader, error) {
switch {
case height == int32(h.Height):
return h, nil
case height > int32(h.Height), height < 0:
return nil, nil // dcrd's blockNode.Ancestor returns nil for child heights
}
if len(chain) > 0 && height-int32(chain[0].Header.Height) >= 0 {
return chain[height-int32(chain[0].Header.Height)].Header, nil
}
// Because the parent of chain[0] must be in the main chain, the header can
// be queried by its main chain height.
ns := dbtx.ReadBucket(wtxmgrNamespaceKey)
hash, err := w.TxStore.GetMainChainBlockHashForHeight(ns, height)
if err != nil {
return nil, err
}
return w.TxStore.GetBlockHeader(dbtx, &hash)
}
// nextRequiredDCP0001PoSDifficulty calculates the required stake difficulty for
// the block after the passed previous block node based on the algorithm defined
// in DCP0001.
func (w *Wallet) nextRequiredDCP0001PoSDifficulty(dbtx walletdb.ReadTx, curHeader *wire.BlockHeader, chain []*BlockNode) (dcrutil.Amount, error) {
// Stake difficulty before any tickets could possibly be purchased is
// the minimum value.
nextHeight := int64(0)
if curHeader != nil {
nextHeight = int64(curHeader.Height) + 1
}
stakeDiffStartHeight := int64(w.chainParams.CoinbaseMaturity) + 1
if nextHeight < stakeDiffStartHeight {
return dcrutil.Amount(w.chainParams.MinimumStakeDiff), nil
}
// Return the previous block's difficulty requirements if the next block
// is not at a difficulty retarget interval.
intervalSize := w.chainParams.StakeDiffWindowSize
curDiff := curHeader.SBits
if nextHeight%intervalSize != 0 {
return dcrutil.Amount(curDiff), nil
}
// Get the pool size and number of tickets that were immature at the
// previous retarget interval.
//
// NOTE: Since the stake difficulty must be calculated based on existing
// blocks, it is always calculated for the block after a given block, so
// the information for the previous retarget interval must be retrieved
// relative to the block just before it to coincide with how it was
// originally calculated.
var prevPoolSize int64
prevRetargetHeight := nextHeight - intervalSize - 1
prevRetargetHeader, err := w.ancestorHeaderAtHeight(dbtx, curHeader, chain, int32(prevRetargetHeight))
if err != nil {
return 0, err
}
if prevRetargetHeader != nil {
prevPoolSize = int64(prevRetargetHeader.PoolSize)
}
ticketMaturity := int64(w.chainParams.TicketMaturity)
prevImmatureTickets, err := w.sumPurchasedTickets(dbtx, prevRetargetHeader, chain, ticketMaturity)
if err != nil {
return 0, err
}
// Return the existing ticket price for the first few intervals to avoid
// division by zero and encourage initial pool population.
prevPoolSizeAll := prevPoolSize + prevImmatureTickets
if prevPoolSizeAll == 0 {
return dcrutil.Amount(curDiff), nil
}
// Count the number of currently immature tickets.
immatureTickets, err := w.sumPurchasedTickets(dbtx, curHeader, chain, ticketMaturity)
if err != nil {
return 0, err
}
// Calculate and return the final next required difficulty.
curPoolSizeAll := int64(curHeader.PoolSize) + immatureTickets
sdiff := calcNextStakeDiffV2(w.chainParams, nextHeight, curDiff, prevPoolSizeAll, curPoolSizeAll)
return dcrutil.Amount(sdiff), nil
}
// NextStakeDifficulty returns the ticket price for the next block after the
// current main chain tip block. This function only suceeds when DCP0001 is
// known to be active. As a fallback, the StakeDifficulty method of
// wallet.NetworkBackend may be used to query the next ticket price from a
// trusted full node.
func (w *Wallet) NextStakeDifficulty() (dcrutil.Amount, error) {
const op errors.Op = "wallet.NextStakeDifficulty"
var sdiff dcrutil.Amount
err := walletdb.View(w.db, func(dbtx walletdb.ReadTx) error {
ns := dbtx.ReadBucket(wtxmgrNamespaceKey)
tipHash, tipHeight := w.TxStore.MainChainTip(ns)
if !deployments.DCP0001.Active(tipHeight, w.chainParams) {
return errors.E(errors.Deployment, "DCP0001 is not known to be active")
}
tipHeader, err := w.TxStore.GetBlockHeader(dbtx, &tipHash)
if err != nil {
return err
}
sdiff, err = w.nextRequiredDCP0001PoSDifficulty(dbtx, tipHeader, nil)
return err
})
if err != nil {
return 0, errors.E(op, err)
}
return sdiff, nil
}
// NextStakeDifficultyAfterHeader returns the ticket price for the child of h.
// All headers of ancestor blocks of h must be recorded by the wallet. This
// function only suceeds when DCP0001 is known to be active.
func (w *Wallet) NextStakeDifficultyAfterHeader(h *wire.BlockHeader) (dcrutil.Amount, error) {
const op errors.Op = "wallet.NextStakeDifficultyAfterHeader"
if !deployments.DCP0001.Active(int32(h.Height), w.chainParams) {
return 0, errors.E(op, errors.Deployment, "DCP0001 is not known to be active")
}
var sdiff dcrutil.Amount
err := walletdb.View(w.db, func(dbtx walletdb.ReadTx) error {
var err error
sdiff, err = w.nextRequiredDCP0001PoSDifficulty(dbtx, h, nil)
return err
})
if err != nil {
return 0, errors.E(op, err)
}
return sdiff, nil
}
// ValidateHeaderChainDifficulties validates the PoW and PoS difficulties of all
// blocks in chain[idx:]. The parent of chain[0] must be recorded as wallet
// main chain block. If a consensus violation is caught, a subslice of chain
// beginning with the invalid block is returned.
func (w *Wallet) ValidateHeaderChainDifficulties(chain []*BlockNode, idx int) ([]*BlockNode, error) {
var invalid []*BlockNode
err := walletdb.View(w.db, func(dbtx walletdb.ReadTx) error {
var err error
invalid, err = w.validateHeaderChainDifficulties(dbtx, chain, idx)
return err
})
return invalid, err
}
func (w *Wallet) validateHeaderChainDifficulties(dbtx walletdb.ReadTx, chain []*BlockNode, idx int) ([]*BlockNode, error) {
const op errors.Op = "wallet.validateHeaderChainDifficulties"
inMainChain, _ := w.TxStore.BlockInMainChain(dbtx, &chain[0].Header.PrevBlock)
if !inMainChain {
return nil, errors.E(op, errors.Bug, "parent of chain[0] is not in main chain")
}
var parent *wire.BlockHeader
for ; idx < len(chain); idx++ {
n := chain[idx]
h := n.Header
hash := h.BlockHash()
if parent == nil && h.Height != 0 {
if idx == 0 {
var err error
parent, err = w.TxStore.GetBlockHeader(dbtx, &h.PrevBlock)
if err != nil {
return nil, err
}
} else {
parent = chain[idx-1].Header
}
}
// Validate advertised and performed work
bits, err := w.nextRequiredPoWDifficulty(dbtx, parent, chain, h.Timestamp)
if err != nil {
return nil, errors.E(op, err)
}
if h.Bits != bits {
err := errors.Errorf("%v has invalid PoW difficulty, got %x, want %x",
&hash, h.Bits, bits)
return chain[idx:], errors.E(op, errors.Consensus, err)
}
err = blockchain.CheckProofOfWork(h, w.chainParams.PowLimit)
if err != nil {
return chain[idx:], errors.E(op, errors.Consensus, err)
}
// Validate ticket price
if deployments.DCP0001.Active(int32(h.Height), w.chainParams) {
sdiff, err := w.nextRequiredDCP0001PoSDifficulty(dbtx, parent, chain)
if err != nil {
return nil, errors.E(op, err)
}
if dcrutil.Amount(h.SBits) != sdiff {
err := errors.Errorf("%v has invalid PoS difficulty, got %v, want %v",
&hash, dcrutil.Amount(h.SBits), sdiff)
return chain[idx:], errors.E(op, errors.Consensus, err)
}
}
parent = h
}
return nil, nil
}
| calcNextStakeDiffV2 |
testgame.py | import sys
from PyQt5 import QtWidgets
class game(QtWidgets.QMainWindow):
|
test = game()
test.showtime() | def __init__(self):
self.ap = QtWidgets.QApplication(sys.argv)
super(game, self).__init__()
self.setGeometry(100,100,200,200)
self.conts()
self.x = 50
self.y = 50
def moving(self):
self.x+=10
self.obj1.move(self.x, self.y)
def conts(self):
self.obj1 = QtWidgets.QLabel(self)
self.obj1.setText("0")
self.obj1.move(50,50)
# object
self.rb = QtWidgets.QPushButton(self)
self.rb.clicked.connect(self.moving)
# movement
def showtime(self):
self.show()
sys.exit(self.ap.exec_()) |
add-binary.py | # Time: O(n)
# Space: O(1)
class Solution(object):
# @param a, a string
# @param b, a string
# @return a string
def | (self, a, b):
result, carry, val = "", 0, 0
for i in range(max(len(a), len(b))):
val = carry
if i < len(a):
val += int(a[-(i + 1)])
if i < len(b):
val += int(b[-(i + 1)])
carry, val = divmod(val, 2)
result += str(val)
if carry:
result += str(carry)
return result[::-1]
# Time: O(n)
# Space: O(1)
from itertools import zip_longest
class Solution2(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
result = ""
carry = 0
for x, y in zip_longest(reversed(a), reversed(b), fillvalue="0"):
carry, remainder = divmod(int(x)+int(y)+carry, 2)
result += str(remainder)
if carry:
result += str(carry)
return result[::-1]
| addBinary |
zero.rs | // Test that a `recursion_limit` of 0 is valid
#![recursion_limit = "0"]
macro_rules! test {
() => {};
($tt:tt) => { test!(); };
}
test!(test); //~ ERROR 10:1: 10:13: recursion limit reached while expanding `test!`
fn main() | {} |
|
_auto_rest_head_test_service.py | # coding=utf-8 | # Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
from ._configuration import AutoRestHeadTestServiceConfiguration
from .operations import HttpSuccessOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Dict, Optional
from azure.core.credentials import AzureKeyCredential
from azure.core.rest import HttpRequest, HttpResponse
class AutoRestHeadTestService(object):
"""Test Infrastructure for AutoRest.
:ivar http_success: HttpSuccessOperations operations
:vartype http_success: azure.key.credential.sample.operations.HttpSuccessOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.AzureKeyCredential
:param base_url: Service URL. Default value is 'http://localhost:3000'.
:type base_url: str
"""
def __init__(
self,
credential, # type: AzureKeyCredential
base_url="http://localhost:3000", # type: str
**kwargs # type: Any
):
# type: (...) -> None
self._config = AutoRestHeadTestServiceConfiguration(credential=credential, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {} # type: Dict[str, Any]
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.http_success = HttpSuccessOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AutoRestHeadTestService
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details) | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information. |
test_reshape.py | # Owner(s): ["oncall: fx"]
import torch
import torch.fx.experimental.fx_acc.acc_ops as acc_ops
from torch.testing._internal.common_fx2trt import AccTestCase, InputTensorSpec
from parameterized import parameterized
from torch.testing._internal.common_utils import run_tests
class TestReshapeConverter(AccTestCase):
@parameterized.expand(
[
((1, 20),),
((1, 10, -1),),
]
)
def test_reshape(self, target_shape):
class TestModule(torch.nn.Module):
def __init__(self, target_shape): | def forward(self, x):
return torch.reshape(x, self.target_shape)
inputs = [torch.randn(1, 2, 10)]
self.run_test(TestModule(target_shape), inputs, expected_ops={acc_ops.reshape})
@parameterized.expand(
[
((-1, 2),),
((1, 2, -1),),
]
)
def test_reshape_with_dynamic_shape(self, target_shape):
class TestModule(torch.nn.Module):
def __init__(self, target_shape):
super().__init__()
self.target_shape = target_shape
def forward(self, x):
return torch.reshape(x, self.target_shape)
input_specs = [
InputTensorSpec(
shape=(-1, -1, -1),
dtype=torch.float32,
shape_ranges=[((1, 1, 1), (1, 2, 3), (3, 3, 3))],
),
]
self.run_test_with_dynamic_shape(
TestModule(target_shape), input_specs, expected_ops={acc_ops.reshape}
)
if __name__ == '__main__':
run_tests() | super().__init__()
self.target_shape = target_shape
|
extension.ts | import { Request, WithRequired } from 'apollo-server-env';
import {
GraphQLResolveInfo,
responsePathAsArray,
ResponsePath,
DocumentNode,
ExecutionArgs,
GraphQLError,
} from 'graphql';
import { GraphQLExtension, EndHandler } from 'graphql-extensions';
import { Trace, google } from 'apollo-engine-reporting-protobuf';
import {
EngineReportingOptions,
GenerateClientInfo,
AddTraceArgs,
} from './agent';
import { GraphQLRequestContext } from 'apollo-server-core/dist/requestPipelineAPI';
const clientNameHeaderKey = 'apollographql-client-name';
const clientReferenceIdHeaderKey = 'apollographql-client-reference-id';
const clientVersionHeaderKey = 'apollographql-client-version';
// (DEPRECATE)
// This special type is used internally to this module to implement the
// `maskErrorDetails` (https://github.com/apollographql/apollo-server/pull/1615)
// functionality in the exact form it was originally implemented — which didn't
// have the result matching the interface provided by `GraphQLError` but instead
// just had a `message` property set to `<masked>`. Since `maskErrorDetails`
// is now slated for deprecation (with its behavior superceded by the more
// robust `rewriteError` functionality, this GraphQLErrorOrMaskedErrorObject`
// should be removed when that deprecation is completed in a major release.
type GraphQLErrorOrMaskedErrorObject =
| GraphQLError
| (Partial<GraphQLError> & Pick<GraphQLError, 'message'>);
// EngineReportingExtension is the per-request GraphQLExtension which creates a
// trace (in protobuf Trace format) for a single request. When the request is
// done, it passes the Trace back to its associated EngineReportingAgent via the
// addTrace callback in its constructor. This class isn't for direct use; its
// constructor is a private API for communicating with EngineReportingAgent.
// Its public methods all implement the GraphQLExtension interface.
export class EngineReportingExtension<TContext = any>
implements GraphQLExtension<TContext> {
public trace = new Trace();
private nodes = new Map<string, Trace.Node>();
private startHrTime!: [number, number];
private operationName?: string | null;
private queryString?: string;
private documentAST?: DocumentNode;
private options: EngineReportingOptions<TContext>;
private addTrace: (args: AddTraceArgs) => Promise<void>;
private generateClientInfo: GenerateClientInfo<TContext>;
public constructor(
options: EngineReportingOptions<TContext>,
addTrace: (args: AddTraceArgs) => Promise<void>,
) {
this.options = {
...options,
};
this.addTrace = addTrace;
const root = new Trace.Node();
this.trace.root = root;
this.nodes.set(responsePathAsString(undefined), root);
this.generateClientInfo =
options.generateClientInfo || defaultGenerateClientInfo;
}
public requestDidStart(o: {
request: Request;
queryString?: string;
parsedQuery?: DocumentNode;
variables?: Record<string, any>;
context: TContext;
extensions?: Record<string, any>;
requestContext: WithRequired<
GraphQLRequestContext<TContext>,
'metrics' | 'queryHash'
>;
}): EndHandler {
this.trace.startTime = dateToTimestamp(new Date());
this.startHrTime = process.hrtime();
// Generally, we'll get queryString here and not parsedQuery; we only get
// parsedQuery if you're using an OperationStore. In normal cases we'll get
// our documentAST in the execution callback after it is parsed.
const queryHash = o.requestContext.queryHash;
this.queryString = o.queryString;
this.documentAST = o.parsedQuery;
this.trace.http = new Trace.HTTP({
method:
Trace.HTTP.Method[o.request.method as keyof typeof Trace.HTTP.Method] ||
Trace.HTTP.Method.UNKNOWN,
// Host and path are not used anywhere on the backend, so let's not bother
// trying to parse request.url to get them, which is a potential
// source of bugs because integrations have different behavior here.
// On Node's HTTP module, request.url only includes the path
// (see https://nodejs.org/api/http.html#http_message_url)
// The same is true on Lambda (where we pass event.path)
// But on environments like Cloudflare we do get a complete URL.
host: null,
path: null,
});
if (this.options.privateHeaders !== true) {
for (const [key, value] of o.request.headers) {
if (
this.options.privateHeaders &&
Array.isArray(this.options.privateHeaders) &&
// We assume that most users only have a few private headers, or will
// just set privateHeaders to true; we can change this linear-time
// operation if it causes real performance issues.
this.options.privateHeaders.some(privateHeader => {
// Headers are case-insensitive, and should be compared as such.
return privateHeader.toLowerCase() === key.toLowerCase();
})
) {
continue;
}
switch (key) {
case 'authorization':
case 'cookie':
case 'set-cookie':
break;
default:
this.trace.http!.requestHeaders![key] = new Trace.HTTP.Values({
value: [value],
});
}
}
if (o.requestContext.metrics.persistedQueryHit) {
this.trace.persistedQueryHit = true;
}
if (o.requestContext.metrics.persistedQueryRegister) {
this.trace.persistedQueryRegister = true;
}
}
if (this.options.privateVariables !== true && o.variables) {
// Note: we explicitly do *not* include the details.rawQuery field. The
// Engine web app currently does nothing with this other than store it in
// the database and offer it up via its GraphQL API, and sending it means
// that using calculateSignature to hide sensitive data in the query
// string is ineffective.
this.trace.details = new Trace.Details();
Object.keys(o.variables).forEach(name => {
if (
this.options.privateVariables &&
Array.isArray(this.options.privateVariables) &&
// We assume that most users will have only a few private variables,
// or will just set privateVariables to true; we can change this
// linear-time operation if it causes real performance issues.
this.options.privateVariables.includes(name)
) {
// Special case for private variables. Note that this is a different
// representation from a variable containing the empty string, as that
// will be sent as '""'.
this.trace.details!.variablesJson![name] = '';
} else {
try {
this.trace.details!.variablesJson![name] = JSON.stringify(
o.variables![name],
);
} catch (e) {
// This probably means that the value contains a circular reference,
// causing `JSON.stringify()` to throw a TypeError:
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON/stringify#Issue_with_JSON.stringify()_when_serializing_circular_references
this.trace.details!.variablesJson![name] = JSON.stringify(
'[Unable to convert value to JSON]',
);
}
}
});
}
const clientInfo = this.generateClientInfo(o.requestContext);
if (clientInfo) {
// While clientAddress could be a part of the protobuf, we'll ignore it for
// now, since the backend does not group by it and Engine frontend will not
// support it in the short term
const { clientName, clientVersion, clientReferenceId } = clientInfo;
// the backend makes the choice of mapping clientName => clientReferenceId if
// no custom reference id is provided
this.trace.clientVersion = clientVersion || '';
this.trace.clientReferenceId = clientReferenceId || '';
this.trace.clientName = clientName || '';
}
return () => {
this.trace.durationNs = durationHrTimeToNanos(
process.hrtime(this.startHrTime),
);
this.trace.endTime = dateToTimestamp(new Date());
this.trace.fullQueryCacheHit = !!o.requestContext.metrics
.responseCacheHit;
this.trace.forbiddenOperation = !!o.requestContext.metrics
.forbiddenOperation;
this.trace.registeredOperation = !!o.requestContext.metrics
.registeredOperation;
// If the `operationName` was not already set elsewhere, for example,
// through the `executionDidStart` or the `willResolveField` hooks, then
// we'll resort to using the `operationName` which was requested to be
// executed by the client.
const operationName =
this.operationName || o.requestContext.operationName || '';
const documentAST = this.documentAST || o.requestContext.document;
this.addTrace({
operationName,
queryHash,
documentAST,
queryString: this.queryString || '',
trace: this.trace,
});
};
}
public executionDidStart(o: { executionArgs: ExecutionArgs }) {
// If the operationName is explicitly provided, save it. If there's just one
// named operation, the client doesn't have to provide it, but we still want
// to know the operation name so that the server can identify the query by
// it without having to parse a signature.
//
// Fortunately, in the non-error case, we can just pull this out of
// the first call to willResolveField's `info` argument. In an
// error case (eg, the operationName isn't found, or there are more
// than one operation and no specified operationName) it's OK to continue
// to file this trace under the empty operationName.
if (o.executionArgs.operationName) {
this.operationName = o.executionArgs.operationName;
}
this.documentAST = o.executionArgs.document;
}
public willResolveField(
_source: any,
_args: { [argName: string]: any },
_context: TContext,
info: GraphQLResolveInfo,
): ((error: Error | null, result: any) => void) | void {
if (this.operationName === undefined) {
this.operationName =
(info.operation.name && info.operation.name.value) || '';
}
const path = info.path;
const node = this.newNode(path);
node.type = info.returnType.toString();
node.parentType = info.parentType.toString();
node.startTime = durationHrTimeToNanos(process.hrtime(this.startHrTime));
return () => {
node.endTime = durationHrTimeToNanos(process.hrtime(this.startHrTime));
// We could save the error into the trace here, but it won't have all
// the information that graphql-js adds to it later, like 'locations'.
};
}
public didEncounterErrors(errors: GraphQLError[]) {
errors.forEach(err => {
// In terms of reporting, errors can be re-written by the user by
// utilizing the `rewriteError` parameter. This allows changing
// the message or stack to remove potentially sensitive information.
// Returning `null` will result in the error not being reported at all.
const errorForReporting = this.rewriteError(err);
if (errorForReporting === null) {
return;
}
this.addError(errorForReporting);
});
}
private rewriteError(
err: GraphQLError,
): GraphQLErrorOrMaskedErrorObject | null {
// (DEPRECATE)
// This relatively basic representation of an error is an artifact
// introduced by https://github.com/apollographql/apollo-server/pull/1615.
// Interesting, the implementation of that feature didn't actually
// accomplish what the requestor had desired. This functionality is now
// being superceded by the `rewriteError` function, which is a more dynamic
// implementation which multiple Engine users have been interested in.
// When this `maskErrorDetails` is officially deprecated, this
// `rewriteError` method can be changed to return `GraphQLError | null`,
// and as noted in its definition, `GraphQLErrorOrMaskedErrorObject` can be
// removed.
if (this.options.maskErrorDetails) {
return {
message: '<masked>',
};
}
if (typeof this.options.rewriteError === 'function') {
// Before passing the error to the user-provided `rewriteError` function,
// we'll make a shadow copy of the error so the user is free to change
// the object as they see fit.
// At this stage, this error is only for the purposes of reporting, but
// this is even more important since this is still a reference to the
// original error object and changing it would also change the error which
// is returned in the response to the client.
// For the clone, we'll create a new object which utilizes the exact same
// prototype of the error being reported.
const clonedError = Object.assign(
Object.create(Object.getPrototypeOf(err)),
err,
);
const rewrittenError = this.options.rewriteError(clonedError);
// Returning an explicit `null` means the user is requesting that, in
// terms of Engine reporting, the error be buried.
if (rewrittenError === null) {
return null;
}
// We don't want users to be inadvertently not reporting errors, so if
// they haven't returned an explicit `GraphQLError` (or `null`, handled
// above), then we'll report the error as usual.
if (!(rewrittenError instanceof GraphQLError)) {
return err;
}
return new GraphQLError(
rewrittenError.message,
err.nodes,
err.source,
err.positions,
err.path,
err.originalError,
err.extensions,
);
}
return err;
}
private addError(error: GraphQLErrorOrMaskedErrorObject): void {
// By default, put errors on the root node.
let node = this.nodes.get('');
if (error.path) {
const specificNode = this.nodes.get(error.path.join('.'));
if (specificNode) {
node = specificNode;
}
}
node!.error!.push(
new Trace.Error({
message: error.message,
location: (error.locations || []).map(
({ line, column }) => new Trace.Location({ line, column }),
),
json: JSON.stringify(error),
}),
);
}
private newNode(path: ResponsePath): Trace.Node {
const node = new Trace.Node();
const id = path.key;
if (typeof id === 'number') {
node.index = id;
} else {
node.fieldName = id;
}
this.nodes.set(responsePathAsString(path), node);
const parentNode = this.ensureParentNode(path);
parentNode.child.push(node);
return node;
}
private ensureParentNode(path: ResponsePath): Trace.Node {
const parentPath = responsePathAsString(path.prev);
const parentNode = this.nodes.get(parentPath);
if (parentNode) {
return parentNode;
}
// Because we set up the root path in the constructor, we now know that
// path.prev isn't undefined.
return this.newNode(path.prev!);
}
}
// Helpers for producing traces.
// Convert from the linked-list ResponsePath format to a dot-joined
// string. Includes the full path (field names and array indices).
function responsePathAsString(p: ResponsePath | undefined) {
if (p === undefined) {
return '';
}
return responsePathAsArray(p).join('.');
}
// Converts a JS Date into a Timestamp.
function dateToTimestamp(date: Date): google.protobuf.Timestamp {
const totalMillis = +date;
const millis = totalMillis % 1000;
return new google.protobuf.Timestamp({
seconds: (totalMillis - millis) / 1000,
nanos: millis * 1e6,
});
}
// Converts an hrtime array (as returned from process.hrtime) to nanoseconds.
//
// ONLY CALL THIS ON VALUES REPRESENTING DELTAS, NOT ON THE RAW RETURN VALUE
// FROM process.hrtime() WITH NO ARGUMENTS.
//
// The entire point of the hrtime data structure is that the JavaScript Number
// type can't represent all int64 values without loss of precision:
// Number.MAX_SAFE_INTEGER nanoseconds is about 104 days. Calling this function
// on a duration that represents a value less than 104 days is fine. Calling
// this function on an absolute time (which is generally roughly time since
// system boot) is not a good idea.
//
// XXX We should probably use google.protobuf.Duration on the wire instead of
// ever trying to store durations in a single number.
function dur | time: [number, number]) {
return hrtime[0] * 1e9 + hrtime[1];
}
function defaultGenerateClientInfo({ request }: GraphQLRequestContext) {
// Default to using the `apollo-client-x` header fields if present.
// If none are present, fallback on the `clientInfo` query extension
// for backwards compatibility.
// The default value if neither header values nor query extension is
// set is the empty String for all fields (as per protobuf defaults)
if (
request.http &&
request.http.headers &&
(request.http.headers.get(clientNameHeaderKey) ||
request.http.headers.get(clientVersionHeaderKey) ||
request.http.headers.get(clientReferenceIdHeaderKey))
) {
return {
clientName: request.http.headers.get(clientNameHeaderKey),
clientVersion: request.http.headers.get(clientVersionHeaderKey),
clientReferenceId: request.http.headers.get(clientReferenceIdHeaderKey),
};
} else if (request.extensions && request.extensions.clientInfo) {
return request.extensions.clientInfo;
} else {
return {};
}
}
| ationHrTimeToNanos(hr |
html_view.py | import jinja2
import numpy as np
from PyQt5 import QtCore, QtWidgets
import os.path
import core.elements
import core.bonds
from collections import Counter
from core.calculation.discretization import Discretization
from gui.tabs.statistics.tree_list import TreeList
from gui.util.webview import WebWidget
def render_html_atom_group(atom_number, atom_elements):
template_loader = jinja2.FileSystemLoader(searchpath="gui/tabs/statistics/templates")
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'atoms.html'
template = template_env.get_template(TEMPLATE_FILE)
template_vars = {"title": "Summary of atoms",
"description": "a summary of all calculated atoms",
"atom_number": atom_number,
"atom_elements": atom_elements
}
return template.render(template_vars)
def render_html_atom(index, atom_fullname, atom_positions, atom_number, covalent_radius, cutoff_radius, atom_color_rgb, bonds):
template_loader = jinja2.FileSystemLoader(searchpath="gui/tabs/statistics/templates")
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'atom.html'
template = template_env.get_template(TEMPLATE_FILE)
template_vars = {"title": "Summary of atoms",
"description": "a summary of one atom",
"index": index,
"atom_fullname": atom_fullname,
"atom_positions": atom_positions,
"atom_number": atom_number,
"covalent_radius": covalent_radius,
"cutoff_radius": cutoff_radius,
"atom_color_rgb": atom_color_rgb,
"bonds": bonds,
}
return template.render(template_vars)
def render_html_cavity_center_group(number, surface_area, surface_volumes, volume_fraction):
template_loader = jinja2.FileSystemLoader(searchpath="gui/tabs/statistics/templates")
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'cavities_center.html'
template = template_env.get_template(TEMPLATE_FILE)
template_vars = {"title": "Summary of all cavities",
"description": "a summary of all calculated center bases cavities",
"number": number,
"surface_area": surface_area,
"surface_volumes": surface_volumes,
"volume_fraction": volume_fraction,
}
return template.render(template_vars)
def render_html_cavity_center_group_unknown():
template_loader = jinja2.FileSystemLoader( searchpath="gui/tabs/statistics/templates" )
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'cavities_center_unknown.html'
template = template_env.get_template( TEMPLATE_FILE )
return template.render({})
def render_html_cavity_center(**kwargs):
needed_keys = ('index', 'surface', 'volume', 'domains', 'volume_fraction', 'mass_center', 'squared_gyration_radius',
'asphericity', 'acylindricity', 'anisotropy', 'characteristic_radius', 'is_cyclic')
template_loader = jinja2.FileSystemLoader(searchpath="gui/tabs/statistics/templates")
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'cavity_center.html'
template = template_env.get_template(TEMPLATE_FILE)
template_vars = dict(kwargs)
template_vars["title"] = "Summary of one cavity (domain)"
template_vars["description"] = "a summary of one calculated cavities (domain)"
if kwargs["surface"] is not None and kwargs["volume"] is not None:
template_vars["surface_to_volume_ratio"] = kwargs["surface"] / kwargs["volume"]
missing_values = tuple(key for key in needed_keys if key not in kwargs or kwargs[key] is None)
template_vars.update((key, None) for key in missing_values)
template_vars["missing_values"] = missing_values if len(missing_values) > 0 else None
return template.render(template_vars)
def render_html_cavity_surface_group(number, surface_area, surface_volumes, volume_fraction):
template_loader = jinja2.FileSystemLoader(searchpath="gui/tabs/statistics/templates")
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'cavities_surface.html'
template = template_env.get_template(TEMPLATE_FILE)
template_vars = {"title": "Summary of atoms",
"description": "a summary of all calculated surface based cavities",
"number": number,
"surface_area": surface_area,
"surface_volumes": surface_volumes,
"volume_fraction": volume_fraction,
}
return template.render(template_vars)
def render_html_cavity_surface_group_unknown():
template_loader = jinja2.FileSystemLoader( searchpath="gui/tabs/statistics/templates" )
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'cavities_surface_unknown.html'
template = template_env.get_template( TEMPLATE_FILE )
return template.render({})
def render_html_cavity_surface(**kwargs):
needed_keys = ('index', 'surface', 'volume', 'domains', 'volume_fraction', 'mass_center', 'squared_gyration_radius',
'asphericity', 'acylindricity', 'anisotropy', 'characteristic_radius', 'is_cyclic')
template_loader = jinja2.FileSystemLoader(searchpath="gui/tabs/statistics/templates")
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'cavity_surface.html'
template = template_env.get_template(TEMPLATE_FILE)
template_vars = dict(kwargs)
template_vars["title"] = "Summary of one cavity (domain)"
template_vars["description"] = "a summary of one calculated cavities (domain)"
if kwargs["surface"] is not None and kwargs["volume"] is not None:
template_vars["surface_to_volume_ratio"] = kwargs["surface"] / kwargs["volume"]
missing_values = tuple(key for key in needed_keys if key not in kwargs or kwargs[key] is None)
template_vars.update((key, None) for key in missing_values)
template_vars["missing_values"] = missing_values if len(missing_values) > 0 else None
return template.render(template_vars)
def render_html_cavity_domain_group(number, surface_area, surface_volumes, surface_volumes_fractions):
template_loader = jinja2.FileSystemLoader(searchpath="gui/tabs/statistics/templates")
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'domains.html'
template = template_env.get_template(TEMPLATE_FILE)
template_vars = {"title": "Summary of all cavities (domains)",
"description": "a summary of all calculated cavities (domains)",
"number": number,
"surface_area": surface_area,
"surface_volumes": surface_volumes,
"surface_volumes_fractions": surface_volumes_fractions,
}
return template.render(template_vars)
def render_html_cavity_domain_group_unknown():
template_loader = jinja2.FileSystemLoader( searchpath="gui/tabs/statistics/templates" )
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'domains_unknown.html'
template = template_env.get_template( TEMPLATE_FILE )
return template.render({})
def render_html_cavity_domain(**kwargs):
needed_keys = ('index', 'center', 'surface', 'volume', 'volume_fraction', 'surface_cavity_index',
'center_cavity_index', 'mass_center', 'squared_gyration_radius', 'asphericity',
'acylindricity', 'anisotropy', 'characteristic_radius', 'is_cyclic')
template_loader = jinja2.FileSystemLoader(searchpath="gui/tabs/statistics/templates")
template_env = jinja2.Environment(loader=template_loader)
TEMPLATE_FILE = 'domain.html'
template = template_env.get_template(TEMPLATE_FILE)
template_vars = dict(kwargs)
template_vars["title"] = "Summary of one cavity (domain)"
template_vars["description"] = "a summary of one calculated cavities (domain)"
if kwargs["surface"] is not None and kwargs["volume"] is not None:
template_vars["surface_to_volume_ratio"] = kwargs["surface"] / kwargs["volume"]
missing_values = tuple(key for key in needed_keys if key not in kwargs or kwargs[key] is None)
template_vars.update((key, None) for key in missing_values)
template_vars["missing_values"] = missing_values if len(missing_values) > 0 else None
return template.render(template_vars)
class HTMLWindow(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
box = QtWidgets.QVBoxLayout()
self.webview = WebWidget(css_filepath='gui/tabs/statistics/templates/style.css')
self.atoms = None
self.cavities_center = None
self.cavities_surface = None
self.domains = None
self.discretization = None
self.tree_list = None
self.webview.set_gui_html(None)
self.webview.gui_link_clicked.connect(self.link_clicked)
box.addWidget(self.webview)
self.setLayout(box)
box.setContentsMargins(5, 0, 0, 0)
self.show()
def minimumSizeHint(self):
return QtCore.QSize(150, -1)
def sizeHint(self):
return QtCore.QSize(250, -1)
def link_clicked(self, value):
'''
examines the data of the given link by *data* an calls the specific method to render the new HTML page
:param data: Value of the link clicked in Webview
:return: None
'''
value = value.split("/")
if value[0] == "surface_cavity":
self.show_surface_cavity(int(value[1])-1)
elif value[0] == "center_cavity":
self.show_center_cavity(int(value[1])-1)
elif value[0] == "domain":
self.show_domain(int(value[1])-1)
elif value[0] == "focus":
position = [float(value[1]),float(value[2]),float(value[3])]
self.window().control.visualization.set_focus_on(*position)
self.window().center.gl_widget.update()
self.window().center.combo.setCurrentIndex(0)
self.window().center.gl_stack.setCurrentIndex(0)
elif value[0] == "atom":
self.show_atom(int(value[1]))
elif value[0] == 'hideothers':
parent = self.parent()
while parent.parent():
parent = parent.parent()
main_window = parent
view_tab = main_window.view_dock.view_tab
if value[1] == 'atom':
atom_index = int(value[2])-1
view_tab.atom_check.indices = [atom_index]
view_tab.atom_check.selection_checkbox_set_checked(True)
elif value[1] == 'element':
element = core.elements.names[int(value[2])]
visible_atom_indices = []
for i, element_name in enumerate(self.atoms.elements):
if core.elements.names[core.elements.numbers[element_name.upper()]] == element:
visible_atom_indices.append(i)
view_tab.atom_check.indices = visible_atom_indices
view_tab.atom_check.selection_checkbox_set_checked(True)
elif value[1] == 'domain':
domain_index = int(value[2])-1
view_tab.domain_check.indices = [domain_index]
view_tab.domain_check.selection_checkbox_set_checked(True)
elif value[1] == 'surface_cavity':
surface_cavity_index = int(value[2])-1
view_tab.surface_cavity_check.indices = [surface_cavity_index]
view_tab.surface_cavity_check.selection_checkbox_set_checked(True)
elif value[1] == 'center_cavity':
center_cavity_index = int(value[2])-1
view_tab.center_cavity_check.indices = [center_cavity_index]
view_tab.center_cavity_check.selection_checkbox_set_checked(True)
elif value[0] == 'addtovisible':
parent = self.parent()
while parent.parent():
parent = parent.parent()
main_window = parent
view_tab = main_window.view_dock.view_tab
if value[1] == 'atom':
atom_index = int(value[2])-1
view_tab.domain_check.add_indices([atom_index])
elif value[1] == 'domain':
domain_index = int(value[2])-1
view_tab.domain_check.add_indices([domain_index])
elif value[1] == 'surface_cavity':
surface_cavity_index = int(value[2])-1
view_tab.surface_cavity_check.add_indices([surface_cavity_index])
elif value[1] == 'center_cavity':
center_cavity_index = int(value[2])-1
view_tab.center_cavity_check.add_indices([center_cavity_index])
elif value[0] == 'showall':
parent = self.parent()
while parent.parent():
parent = parent.parent()
main_window = parent
view_tab = main_window.view_dock.view_tab
if value[1] == 'atoms':
view_tab.atom_check.setChecked(True)
view_tab.atom_check.selection_checkbox_set_checked(False)
if value[1] == 'domains':
view_tab.domain_check.setChecked(True)
view_tab.domain_check.selection_checkbox_set_checked(False)
if value[1] == 'surface_cavities':
view_tab.surface_cavity_check.setChecked(True)
view_tab.surface_cavity_check.selection_checkbox_set_checked(False)
if value[1] == 'center_cavities':
view_tab.center_cavity_check.setChecked(True)
view_tab.center_cavity_check.selection_checkbox_set_checked(False)
elif value[0] == 'recalculate':
parent = self.parent()
while parent.parent():
parent = parent.parent()
main_window = parent
file_tab = main_window.file_dock.file_tab
current_filename, current_frame = file_tab.last_shown_filename_with_frame
file_tab.calculate({current_filename: [current_frame]})
def update_results(self, results):
self.atoms = results.atoms
self.cavities_center = results.center_cavities
self.cavities_surface = results.surface_cavities
self.domains = results.domains
self.discretization = Discretization(results.atoms.volume, results.resolution, True)
self.show_atom_group()
def show_atom_group(self):
atom_number = self.atoms.number
atom_elements = Counter(self.atoms.elements)
self.webview.set_gui_html(render_html_atom_group(atom_number, atom_elements))
def show_atom(self, index):
if self.tree_list is not None:
self.tree_list.select_atom(index)
#for bond in bonds:
# if index not in self.atoms.bonds[bond]:
# self.atoms.bonds[bond].append(index)
atom_name = self.atoms.elements[index] # atom name from periodic systen
atom_fullname = core.elements.names[core.elements.numbers[atom_name.upper()]] # get full atom name
atom_color_rgb = core.elements.colors[core.elements.numbers[atom_name.upper()]]
atom_positions = self.atoms.positions[index]
atom_number = core.elements.numbers[atom_name.upper()]
covalent_radius = self.atoms.covalence_radii[index]
cutoff_radius = self.atoms.radii[index]
bonds = self.atoms.bonds[index]
#print dir(self.domains[0])
self.webview.set_gui_html(render_html_atom(index, atom_fullname, atom_positions, atom_number, covalent_radius, cutoff_radius, atom_color_rgb, bonds))
def show_center_cavity_group(self):
number = 0
surface_area = 0.0
volumes = 0.0
volume_fraction = 0.0
if self.cavities_center is None:
self.webview.set_gui_html(render_html_cavity_center_group_unknown())
return
number = self.cavities_center.number
for sf in self.cavities_center.surface_areas:
surface_area += sf
for vl in self.cavities_center.volumes:
volumes += vl
if self.atoms.volume is not None:
volume_fraction = (volumes/self.atoms.volume.volume)*100
self.webview.set_gui_html(render_html_cavity_center_group(number, surface_area, volumes, volume_fraction))
def | (self, index):
if self.tree_list is not None:
self.tree_list.select_center_cavity(index)
attrs = self._create_attr_getter(self.cavities_center, index)
data = {}
data['index'] = index
data['volume_fraction'] = 0.0
cavities = attrs.multicavities
domains = []
for cavity in cavities:
domains.append((cavity+1, self.discretization.discrete_to_continuous(self.domains.centers[cavity])))
data['domains'] = domains
data['surface'] = attrs.surface_areas
data['volume'] = attrs.volumes
data['mass_center'] = attrs.mass_centers
data['squared_gyration_radius'] = attrs.squared_gyration_radii
data['asphericity'] = attrs.asphericities
data['acylindricity'] = attrs.acylindricities
data['anisotropy'] = attrs.anisotropies
data['characteristic_radius'] = attrs.characteristic_radii
data['is_cyclic'] = index in self.cavities_center.cyclic_area_indices
if self.atoms.volume is not None:
data['volume_fraction'] = (data['volume']/self.atoms.volume.volume)*100
self.webview.set_gui_html(render_html_cavity_center(**data))
def show_surface_cavity_group(self):
number = 0
surface_area = 0.0
volumes = 0.0
volume_fraction = 0.0
if self.cavities_surface is None:
self.webview.set_gui_html(render_html_cavity_surface_group_unknown())
return
number = self.cavities_surface.number
for sf in self.cavities_surface.surface_areas:
surface_area += sf
for vl in self.cavities_surface.volumes:
volumes += vl
if self.atoms.volume is not None:
volume_fraction = (volumes/self.atoms.volume.volume)*100
self.webview.set_gui_html(render_html_cavity_surface_group(number, surface_area, volumes, volume_fraction))
def show_surface_cavity(self, index):
if self.tree_list is not None:
self.tree_list.select_surface_cavity(index)
attrs = self._create_attr_getter(self.cavities_surface, index)
data = {}
data['index'] = index
data['volume_fraction'] = 0.0
cavities = attrs.multicavities
domains = []
for cavity in cavities:
domains.append((cavity+1, self.discretization.discrete_to_continuous(self.domains.centers[cavity])))
data['domains'] = domains
data['surface'] = attrs.surface_areas
data['volume'] = attrs.volumes
data['mass_center'] = attrs.mass_centers
data['squared_gyration_radius'] = attrs.squared_gyration_radii
data['asphericity'] = attrs.asphericities
data['acylindricity'] = attrs.acylindricities
data['anisotropy'] = attrs.anisotropies
data['characteristic_radius'] = attrs.characteristic_radii
data['is_cyclic'] = index in self.cavities_surface.cyclic_area_indices
if self.atoms.volume is not None:
data['volume_fraction'] = (data['volume']/self.atoms.volume.volume)*100
self.webview.set_gui_html(render_html_cavity_surface(**data))
def show_domain_group(self):
number = 0
surface = 0.0
volumes = 0.0
volume_fraction = 0.0
if self.domains is None:
self.webview.set_gui_html(render_html_cavity_domain_group_unknown())
return
number = self.domains.number
for sf in self.domains.surface_areas:
surface += sf
for vl in self.domains.volumes:
volumes += vl
if self.atoms.volume is not None:
volume_fraction = (volumes/self.atoms.volume.volume)*100
self.webview.set_gui_html(render_html_cavity_domain_group(number, surface, volumes, volume_fraction))
def show_domain(self, index):
if self.tree_list is not None:
self.tree_list.select_domain(index)
attrs = self._create_attr_getter(self.domains, index)
data = {}
data['index'] = index
discrete_center = attrs.centers
data['center'] = self.discretization.discrete_to_continuous(discrete_center)
data['surface'] = attrs.surface_areas
data['volume'] = attrs.volumes
data['volume_fraction'] = 0.0
data['surface_cavity_index'] = None
data['center_cavity_index'] = None
data['mass_center'] = attrs.mass_centers
data['squared_gyration_radius'] = attrs.squared_gyration_radii
data['asphericity'] = attrs.asphericities
data['acylindricity'] = attrs.acylindricities
data['anisotropy'] = attrs.anisotropies
data['characteristic_radius'] = attrs.characteristic_radii
data['is_cyclic'] = index in self.domains.cyclic_area_indices
if self.atoms.volume is not None:
data['volume_fraction'] = (data['volume']/self.atoms.volume.volume)*100
if self.domains is not None and self.cavities_surface is not None:
for i in range(len(self.cavities_surface.multicavities)):
if index in self.cavities_surface.multicavities[i]:
data['surface_cavity_index'] = i+1
break
if self.domains is not None and self.cavities_center is not None:
for i in range(len(self.cavities_center.multicavities)):
if index in self.cavities_center.multicavities[i]:
data['center_cavity_index'] = i+1
break
self.webview.set_gui_html(render_html_cavity_domain(**data))
@staticmethod
def _create_attr_getter(obj, index):
class AttrGetter(object):
def __init__(self, obj, index):
self._obj = obj
self._index = index
def __getattr__(self, attr):
value = getattr(self._obj, attr)
is_numpy_array = isinstance(value, np.ndarray)
if ((is_numpy_array and len(value.shape) > 0) or
(not is_numpy_array and len(value) > 0)):
return value[self._index]
else:
return None
return AttrGetter(obj, index)
| show_center_cavity |
documents.py | import logging
from django.conf import settings
from django_elasticsearch_dsl import DocType, Index, fields
from elasticsearch import Elasticsearch
from readthedocs.projects.models import HTMLFile, Project
project_conf = settings.ES_INDEXES['project']
project_index = Index(project_conf['name'])
project_index.settings(**project_conf['settings'])
page_conf = settings.ES_INDEXES['page']
page_index = Index(page_conf['name'])
page_index.settings(**page_conf['settings'])
log = logging.getLogger(__name__)
class RTDDocTypeMixin:
def update(self, *args, **kwargs):
# Hack a fix to our broken connection pooling
# This creates a new connection on every request,
# but actually works :)
log.info('Hacking Elastic indexing to fix connection pooling')
self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])
super().update(*args, **kwargs)
@project_index.doc_type
class ProjectDocument(RTDDocTypeMixin, DocType):
# Metadata
url = fields.TextField(attr='get_absolute_url')
users = fields.NestedField(
properties={
'username': fields.TextField(),
'id': fields.IntegerField(),
}
)
language = fields.KeywordField()
modified_model_field = 'modified_date'
class Meta:
model = Project
fields = ('name', 'slug', 'description')
ignore_signals = True
@page_index.doc_type
class PageDocument(RTDDocTypeMixin, DocType):
# Metadata
project = fields.KeywordField(attr='project.slug')
version = fields.KeywordField(attr='version.slug')
path = fields.KeywordField(attr='processed_json.path')
full_path = fields.KeywordField(attr='path')
rank = fields.IntegerField()
# Searchable content
title = fields.TextField(attr='processed_json.title')
sections = fields.NestedField(
attr='processed_json.sections',
properties={
'id': fields.KeywordField(),
'title': fields.TextField(),
'content': fields.TextField(),
}
)
domains = fields.NestedField(
properties={
'role_name': fields.KeywordField(),
# For linking to the URL
'anchor': fields.KeywordField(),
# For showing in the search result
'type_display': fields.TextField(),
'docstrings': fields.TextField(),
# Simple analyzer breaks on `.`,
# otherwise search results are too strict for this use case
'name': fields.TextField(analyzer='simple'),
}
)
modified_model_field = 'modified_date'
class Meta:
model = HTMLFile
fields = ('commit', 'build')
ignore_signals = True
def prepare_rank(self, html_file):
if not (-10 <= html_file.rank <= 10):
return 0
return html_file.rank
def prepare_domains(self, html_file):
"""Prepares and returns the values for domains field."""
if not html_file.version.is_sphinx_type:
return []
all_domains = []
try:
domains_qs = html_file.sphinx_domains.exclude(
domain='std',
type__in=['doc', 'label']
).iterator()
all_domains = [
{
'role_name': domain.role_name,
'anchor': domain.anchor,
'type_display': domain.type_display,
'docstrings': html_file.processed_json.get(
'domain_data', {}
).get(domain.anchor, ''),
'name': domain.name,
}
for domain in domains_qs
]
log.debug(
"[%s] [%s] Total domains for file %s are: %s",
html_file.project.slug,
html_file.version.slug,
html_file.path,
len(all_domains)
)
except Exception:
log.exception(
"[%s] [%s] Error preparing domain data for file %s",
html_file.project.slug,
html_file.version.slug,
html_file.path
)
| """
Ignore certain files from indexing.
- Files from external versions
- Ignored files
"""
queryset = super().get_queryset()
queryset = (
queryset
.internal()
.exclude(ignore=True)
)
return queryset | return all_domains
def get_queryset(self): |
point.py | class | :
def __init__(self, x, y):
self.x = x
self.y = y
def getPoint(self):
return (self.x, self.y)
| Point |
50.0b164110.async.js | (window["webpackJsonp"]=window["webpackJsonp"]||[]).push([[50],{cmKq:function(e,a,c){"use strict";c.r(a);var t=c("q1tI"),i={icon:{tag:"svg",attrs:{viewBox:"64 64 896 896",focusable:"false"},children:[{tag:"path",attrs:{d:"M308.6 545.7c-19.8 2-57.1 10.7-77.4 28.6-61 53-24.5 150 99 150 71.8 0 143.5-45.7 199.8-119-80.2-38.9-148.1-66.8-221.4-59.6zM880 112H144c-17.7 0-32 14.3-32 32v736c0 17.7 14.3 32 32 32h736c17.7 0 32-14.3 32-32V144c0-17.7-14.3-32-32-32zm29.4 663.2S703 689.4 598.7 639.5C528.8 725.2 438.6 777.3 345 777.3c-158.4 0-212.1-138.1-137.2-229 16.3-19.8 44.2-38.7 87.3-49.4 67.5-16.5 175 10.3 275.7 43.4 18.1-33.3 33.4-69.9 44.7-108.9H305.1V402h160v-56.2H271.3v-31.3h193.8v-80.1s0-13.5 13.7-13.5H557v93.6h191.7v31.3H557.1V402h156.4c-15 61.1-37.7 117.4-66.2 166.8 47.5 17.1 90.1 33.3 121.8 43.9 114.3 38.2 140.2 40.2 140.2 40.2v122.3z"}}]},name:"alipay-square",theme:"filled"},n=i,s=c("6VBw"),r=function(e,a){return t["createElement"](s["a"],Object.assign({},e,{ref:a,icon:n}))};r.displayName="AlipaySquareFilled";a["default"]=t["forwardRef"](r)}}]); |
||
doc.go | /**
* Copyright 2015 @ S1N1 Team.
* name : external.go
* author : jarryliu
* date : -- :
* description :
* history :
*/
package external |
// the package service for external partner apps. |
|
radio.service.js | /**
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://github.com/NG-ZORRO/ng-zorro-antd/blob/master/LICENSE
*/
import { Injectable } from '@angular/core';
import { ReplaySubject, Subject } from 'rxjs';
export class | {
constructor() {
this.selected$ = new ReplaySubject(1);
this.touched$ = new Subject();
this.disabled$ = new ReplaySubject(1);
this.name$ = new ReplaySubject(1);
}
touch() {
this.touched$.next();
}
select(value) {
this.selected$.next(value);
}
setDisabled(value) {
this.disabled$.next(value);
}
setName(value) {
this.name$.next(value);
}
}
NzRadioService.decorators = [
{ type: Injectable }
];
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoicmFkaW8uc2VydmljZS5qcyIsInNvdXJjZVJvb3QiOiIiLCJzb3VyY2VzIjpbIi4uLy4uLy4uL2NvbXBvbmVudHMvcmFkaW8vcmFkaW8uc2VydmljZS50cyJdLCJuYW1lcyI6W10sIm1hcHBpbmdzIjoiQUFBQTs7O0dBR0c7QUFFSCxPQUFPLEVBQUUsVUFBVSxFQUFFLE1BQU0sZUFBZSxDQUFDO0FBQzNDLE9BQU8sRUFBRSxhQUFhLEVBQUUsT0FBTyxFQUFFLE1BQU0sTUFBTSxDQUFDO0FBSzlDLE1BQU0sT0FBTyxjQUFjO0lBRDNCO1FBRUUsY0FBUyxHQUFHLElBQUksYUFBYSxDQUFZLENBQUMsQ0FBQyxDQUFDO1FBQzVDLGFBQVEsR0FBRyxJQUFJLE9BQU8sRUFBUSxDQUFDO1FBQy9CLGNBQVMsR0FBRyxJQUFJLGFBQWEsQ0FBVSxDQUFDLENBQUMsQ0FBQztRQUMxQyxVQUFLLEdBQUcsSUFBSSxhQUFhLENBQVMsQ0FBQyxDQUFDLENBQUM7SUFhdkMsQ0FBQztJQVpDLEtBQUs7UUFDSCxJQUFJLENBQUMsUUFBUSxDQUFDLElBQUksRUFBRSxDQUFDO0lBQ3ZCLENBQUM7SUFDRCxNQUFNLENBQUMsS0FBZ0I7UUFDckIsSUFBSSxDQUFDLFNBQVMsQ0FBQyxJQUFJLENBQUMsS0FBSyxDQUFDLENBQUM7SUFDN0IsQ0FBQztJQUNELFdBQVcsQ0FBQyxLQUFjO1FBQ3hCLElBQUksQ0FBQyxTQUFTLENBQUMsSUFBSSxDQUFDLEtBQUssQ0FBQyxDQUFDO0lBQzdCLENBQUM7SUFDRCxPQUFPLENBQUMsS0FBYTtRQUNuQixJQUFJLENBQUMsS0FBSyxDQUFDLElBQUksQ0FBQyxLQUFLLENBQUMsQ0FBQztJQUN6QixDQUFDOzs7WUFqQkYsVUFBVSIsInNvdXJjZXNDb250ZW50IjpbIi8qKlxuICogVXNlIG9mIHRoaXMgc291cmNlIGNvZGUgaXMgZ292ZXJuZWQgYnkgYW4gTUlULXN0eWxlIGxpY2Vuc2UgdGhhdCBjYW4gYmVcbiAqIGZvdW5kIGluIHRoZSBMSUNFTlNFIGZpbGUgYXQgaHR0cHM6Ly9naXRodWIuY29tL05HLVpPUlJPL25nLXpvcnJvLWFudGQvYmxvYi9tYXN0ZXIvTElDRU5TRVxuICovXG5cbmltcG9ydCB7IEluamVjdGFibGUgfSBmcm9tICdAYW5ndWxhci9jb3JlJztcbmltcG9ydCB7IFJlcGxheVN1YmplY3QsIFN1YmplY3QgfSBmcm9tICdyeGpzJztcblxuaW1wb3J0IHsgTnpTYWZlQW55IH0gZnJvbSAnbmctem9ycm8tYW50ZC9jb3JlL3R5cGVzJztcblxuQEluamVjdGFibGUoKVxuZXhwb3J0IGNsYXNzIE56UmFkaW9TZXJ2aWNlIHtcbiAgc2VsZWN0ZWQkID0gbmV3IFJlcGxheVN1YmplY3Q8TnpTYWZlQW55PigxKTtcbiAgdG91Y2hlZCQgPSBuZXcgU3ViamVjdDx2b2lkPigpO1xuICBkaXNhYmxlZCQgPSBuZXcgUmVwbGF5U3ViamVjdDxib29sZWFuPigxKTtcbiAgbmFtZSQgPSBuZXcgUmVwbGF5U3ViamVjdDxzdHJpbmc+KDEpO1xuICB0b3VjaCgpOiB2b2lkIHtcbiAgICB0aGlzLnRvdWNoZWQkLm5leHQoKTtcbiAgfVxuICBzZWxlY3QodmFsdWU6IE56U2FmZUFueSk6IHZvaWQge1xuICAgIHRoaXMuc2VsZWN0ZWQkLm5leHQodmFsdWUpO1xuICB9XG4gIHNldERpc2FibGVkKHZhbHVlOiBib29sZWFuKTogdm9pZCB7XG4gICAgdGhpcy5kaXNhYmxlZCQubmV4dCh2YWx1ZSk7XG4gIH1cbiAgc2V0TmFtZSh2YWx1ZTogc3RyaW5nKTogdm9pZCB7XG4gICAgdGhpcy5uYW1lJC5uZXh0KHZhbHVlKTtcbiAgfVxufVxuIl19 | NzRadioService |
__init__.py | from pm4py.models.transition_system import transition_system, utils |
||
intersectionObserver.spec.ts | import { ref } from "@vue/composition-api";
import { Vue, nextTick } from "../utils";
import {
useIntersectionObserver,
IntersectionObserverOptions
} from "../../src";
describe("IntersectionObserver", () => {
const _intersectionObserver = window.IntersectionObserver;
const observeFn = jest.fn();
const unobserveFn = jest.fn();
const disconnectFn = jest.fn();
const constructorFn = jest.fn();
beforeAll(() => {
class | {
constructor(...args: any[]) {
constructorFn(...args);
}
observe = observeFn;
unobserve = unobserveFn;
disconnect = disconnectFn;
}
Object.defineProperty(window, "IntersectionObserver", {
writable: true,
configurable: true,
value: IntersectionObserver
});
Object.defineProperty(global, "IntersectionObserver", {
writable: true,
configurable: true,
value: IntersectionObserver
});
});
afterAll(() => {
Object.defineProperty(window, "IntersectionObserver", {
writable: true,
configurable: true,
value: _intersectionObserver
});
Object.defineProperty(global, "IntersectionObserver", {
writable: true,
configurable: true,
value: _intersectionObserver
});
});
beforeEach(() => {
observeFn.mockReset();
unobserveFn.mockReset();
constructorFn.mockReset();
disconnectFn.mockReset();
});
it("should create new IntersectionObserver", () => {
useIntersectionObserver(undefined as any);
expect(constructorFn).toHaveBeenCalledTimes(1);
expect(observeFn).not.toHaveBeenCalled();
});
it("should call disconnect on ref(options) change", async () => {
const options = ref<IntersectionObserverOptions>({
rootMargin: "0px"
});
useIntersectionObserver(options);
expect(constructorFn).toHaveBeenCalledWith(
expect.anything(),
options.value
);
options.value.rootMargin = "1px";
await nextTick();
expect(constructorFn).toHaveBeenCalledTimes(2);
expect(constructorFn).toHaveBeenLastCalledWith(
expect.anything(),
options.value
);
expect(disconnectFn).toHaveBeenCalledTimes(1);
expect(observeFn).not.toHaveBeenCalled();
});
describe("observe/unobserve", () => {
it("should `observe`/`unobserve` if `observe` has been called", () => {
const options = ref<IntersectionObserverOptions>({
rootMargin: "0px"
});
const { observe, unobserve } = useIntersectionObserver(options);
expect(observeFn).not.toHaveBeenCalled();
const el = document.createElement("div");
observe(el);
expect(observeFn).toHaveBeenCalledWith(el);
unobserve(el);
expect(unobserveFn).toHaveBeenCalledWith(el);
});
it("should unwrap the element", () => {
const options = ref<IntersectionObserverOptions>({
rootMargin: "0px"
});
const { observe, unobserve } = useIntersectionObserver(options);
expect(observeFn).not.toHaveBeenCalled();
const el = ref<Element>(document.createElement("div"));
observe(el);
expect(observeFn).toHaveBeenCalledWith(el.value);
unobserve(el);
expect(unobserveFn).toHaveBeenCalledWith(el.value);
});
});
it("should set element when observing", () => {
const options = ref<IntersectionObserverOptions>({
rootMargin: "0px"
});
const { observe, elements } = useIntersectionObserver(options);
const handling: (entries: IntersectionObserverEntry[]) => void =
constructorFn.mock.calls[0][0];
expect(handling).toBeDefined();
const el = document.createElement("div");
observe(el);
const observeEntries = [{ target: el } as any];
handling(observeEntries);
expect(elements).toMatchObject(ref(observeEntries));
});
it("should set isIntersection if all elements are intersecting", async () => {
const options = ref<IntersectionObserverOptions>({
rootMargin: "0px"
});
const { isIntersecting } = useIntersectionObserver(options);
const handling: (entries: IntersectionObserverEntry[]) => void =
constructorFn.mock.calls[0][0];
await nextTick();
expect(isIntersecting.value).toBe(false);
expect(handling).toBeDefined();
let observeEntries: IntersectionObserverEntry[] = [
{ isIntersecting: true } as any
];
handling(observeEntries);
await nextTick();
expect(isIntersecting.value).toBe(true);
observeEntries = [
{ isIntersecting: true } as any,
{ isIntersecting: false } as any
];
handling(observeEntries);
await nextTick();
expect(isIntersecting.value).toBe(false);
observeEntries = [
{ isIntersecting: true } as any,
{ isIntersecting: true } as any
];
handling(observeEntries);
await nextTick();
expect(isIntersecting.value).toBe(true);
});
describe("hooks onMounted/onUnmounted", () => {
it("should hook", () => {
const vm = new Vue({
template: "<div ref='el'></div>",
setup() {
const el = ref<Element>();
return {
...useIntersectionObserver(el, { rootMargin: "0px" }),
el
};
}
});
vm.$mount();
expect(observeFn).toHaveBeenCalledTimes(1);
expect(disconnectFn).toHaveBeenCalledTimes(0);
vm.$destroy();
expect(observeFn).toHaveBeenCalledTimes(1);
expect(disconnectFn).toHaveBeenCalledTimes(1);
});
it("should call observer on custom element", () => {
const vm = new Vue({
template: "<div></div>",
setup() {
return useIntersectionObserver(document.createElement("div"));
}
});
vm.$mount();
expect(observeFn).toHaveBeenCalledTimes(1);
expect(disconnectFn).toHaveBeenCalledTimes(0);
vm.$destroy();
expect(observeFn).toHaveBeenCalledTimes(1);
expect(disconnectFn).toHaveBeenCalledTimes(1);
});
it("should not call observer", () => {
const vm = new Vue({
template: "<div></div>",
setup() {
const el = ref<Element>();
return {
...useIntersectionObserver(el),
el
};
}
});
vm.$mount();
expect(observeFn).not.toHaveBeenCalled();
expect(disconnectFn).not.toHaveBeenCalled();
vm.$destroy();
expect(observeFn).not.toHaveBeenCalled();
expect(disconnectFn).toHaveBeenCalled();
});
});
});
| IntersectionObserver |
config.py | """anjou.config
Reads and parses the configuration YAML.
Functions:
read_config(str) -> dict (throws ConfigException)
"""
from cerberus import Validator
from cerberus.validator import DocumentError
from yaml import safe_load
from anjou.model import CONFIG_SCHEMA, ConfigException
def _is_valid(config: dict) -> bool:
|
def read_config(filename: str) -> dict:
"""Reads the config at the given filename or raises a ConfigException"""
try:
with open(filename, "r") as yaml_file:
yaml_cfg = safe_load(yaml_file)
if _is_valid(yaml_cfg):
return yaml_cfg
raise ConfigException("Config at %s is invalid" % filename)
except OSError as os_error:
raise ConfigException("Error reading %s" % filename) from os_error
| try:
validator = Validator()
return validator.validate(config, CONFIG_SCHEMA)
except DocumentError as document_error:
raise ConfigException("Document is invalid") from document_error |
validator.js | const parser = require('@asyncapi/parser')
const fs = require('fs') |
const validate = async (filePath) => {
if (typeof filePath !== 'string')
throw new Error('path is not string')
const dir = process.env.GITHUB_WORKSPACE || __dirname
const fullPath = path.resolve(dir, filePath)
console.log(`schema file full path:${fullPath}`)
const data = fs.readFileSync(fullPath, 'utf8')
await parser.parse(data)
console.log('schema is valid')
}
module.exports = validate | const path = require('path') |
mutate_test.go | // Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mutate_test
import (
"archive/tar"
"bytes"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/match"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/random"
"github.com/google/go-containerregistry/pkg/v1/stream"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/google/go-containerregistry/pkg/v1/validate"
)
func TestExtractWhiteout(t *testing.T) {
img, err := tarball.ImageFromPath("testdata/whiteout_image.tar", nil)
if err != nil {
t.Errorf("Error loading image: %v", err)
}
tarPath, _ := filepath.Abs("img.tar")
defer os.Remove(tarPath)
tr := tar.NewReader(mutate.Extract(img))
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
name := header.Name
for _, part := range filepath.SplitList(name) {
if part == "foo" {
t.Errorf("whiteout file found in tar: %v", name)
}
}
}
}
func TestExtractOverwrittenFile(t *testing.T) {
img, err := tarball.ImageFromPath("testdata/overwritten_file.tar", nil)
if err != nil {
t.Fatalf("Error loading image: %v", err)
}
tr := tar.NewReader(mutate.Extract(img))
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
name := header.Name
if strings.Contains(name, "foo.txt") {
var buf bytes.Buffer
buf.ReadFrom(tr)
if strings.Contains(buf.String(), "foo") {
t.Errorf("Contents of file were not correctly overwritten")
}
}
}
}
// TestExtractError tests that if there are any errors encountered
func TestExtractError(t *testing.T) {
rc := mutate.Extract(invalidImage{})
if _, err := io.Copy(ioutil.Discard, rc); err == nil {
t.Errorf("rc.Read; got nil error")
} else if !strings.Contains(err.Error(), errInvalidImage.Error()) {
t.Errorf("rc.Read; got %v, want %v", err, errInvalidImage)
}
}
// TestExtractPartialRead tests that the reader can be partially read (e.g.,
// tar headers) and closed without error.
func TestExtractPartialRead(t *testing.T) {
rc := mutate.Extract(invalidImage{})
if _, err := io.Copy(ioutil.Discard, io.LimitReader(rc, 1)); err != nil {
t.Errorf("Could not read one byte from reader")
}
if err := rc.Close(); err != nil {
t.Errorf("rc.Close: %v", err)
}
}
// invalidImage is an image which returns an error when Layers() is called.
type invalidImage struct {
v1.Image
}
var errInvalidImage = errors.New("invalid image")
func (invalidImage) Layers() ([]v1.Layer, error) {
return nil, errInvalidImage
}
func TestNoopCondition(t *testing.T) {
source := sourceImage(t)
result, err := mutate.AppendLayers(source, []v1.Layer{}...)
if err != nil {
t.Fatalf("Unexpected error creating a writable image: %v", err)
}
if !manifestsAreEqual(t, source, result) {
t.Error("manifests are not the same")
}
if !configFilesAreEqual(t, source, result) {
t.Fatal("config files are not the same")
}
}
func TestAppendWithAddendum(t *testing.T) {
source := sourceImage(t)
addendum := mutate.Addendum{
Layer: mockLayer{},
History: v1.History{
Author: "dave",
},
URLs: []string{
"example.com",
},
Annotations: map[string]string{
"foo": "bar",
},
MediaType: types.MediaType("foo"),
}
result, err := mutate.Append(source, addendum)
if err != nil {
t.Fatalf("failed to append: %v", err)
}
layers := getLayers(t, result)
if diff := cmp.Diff(layers[1], mockLayer{}); diff != "" {
t.Fatalf("correct layer was not appended (-got, +want) %v", diff)
}
if configSizesAreEqual(t, source, result) {
t.Fatal("adding a layer MUST change the config file size")
}
cf := getConfigFile(t, result)
if diff := cmp.Diff(cf.History[1], addendum.History); diff != "" {
t.Fatalf("the appended history is not the same (-got, +want) %s", diff)
}
m, err := result.Manifest()
if err != nil {
t.Fatalf("failed to get manifest: %v", err)
}
if diff := cmp.Diff(m.Layers[1].URLs, addendum.URLs); diff != "" {
t.Fatalf("the appended URLs is not the same (-got, +want) %s", diff)
}
if diff := cmp.Diff(m.Layers[1].Annotations, addendum.Annotations); diff != "" {
t.Fatalf("the appended Annotations is not the same (-got, +want) %s", diff)
}
if diff := cmp.Diff(m.Layers[1].MediaType, addendum.MediaType); diff != "" {
t.Fatalf("the appended MediaType is not the same (-got, +want) %s", diff)
}
}
func TestAppendLayers(t *testing.T) {
source := sourceImage(t)
layer, err := random.Layer(100, types.DockerLayer)
if err != nil {
t.Fatal(err)
}
result, err := mutate.AppendLayers(source, layer)
if err != nil {
t.Fatalf("failed to append a layer: %v", err)
}
if manifestsAreEqual(t, source, result) {
t.Fatal("appending a layer did not mutate the manifest")
}
if configFilesAreEqual(t, source, result) {
t.Fatal("appending a layer did not mutate the config file")
}
if configSizesAreEqual(t, source, result) {
t.Fatal("adding a layer MUST change the config file size")
}
layers := getLayers(t, result)
if got, want := len(layers), 2; got != want {
t.Fatalf("Layers did not return the appended layer "+
"- got size %d; expected 2", len(layers))
}
if layers[1] != layer {
t.Errorf("correct layer was not appended: got %v; want %v", layers[1], layer)
}
if err := validate.Image(result); err != nil {
t.Errorf("validate.Image() = %v", err)
}
}
func TestMutateConfig(t *testing.T) {
source := sourceImage(t)
cfg, err := source.ConfigFile()
if err != nil {
t.Fatalf("error getting source config file")
}
newEnv := []string{"foo=bar"}
cfg.Config.Env = newEnv
result, err := mutate.Config(source, cfg.Config)
if err != nil {
t.Fatalf("failed to mutate a config: %v", err)
}
if manifestsAreEqual(t, source, result) {
t.Error("mutating the config MUST mutate the manifest")
}
if configFilesAreEqual(t, source, result) {
t.Error("mutating the config did not mutate the config file")
}
if configSizesAreEqual(t, source, result) {
t.Error("adding an environment variable MUST change the config file size")
}
if configDigestsAreEqual(t, source, result) {
t.Errorf("mutating the config MUST mutate the config digest")
}
if !reflect.DeepEqual(cfg.Config.Env, newEnv) {
t.Errorf("incorrect environment set %v!=%v", cfg.Config.Env, newEnv)
}
if err := validate.Image(result); err != nil {
t.Errorf("validate.Image() = %v", err)
}
}
type arbitrary struct {
}
func (arbitrary) RawManifest() ([]byte, error) {
return []byte(`{"hello":"world"}`), nil
}
func TestAnnotations(t *testing.T) {
anns := map[string]string{
"foo": "bar",
}
for _, c := range []struct {
desc string
in mutate.Annotatable
want string
}{{
desc: "image",
in: empty.Image,
want: `{"schemaVersion":2,"mediaType":"application/vnd.docker.distribution.manifest.v2+json","config":{"mediaType":"application/vnd.docker.container.image.v1+json","size":115,"digest":"sha256:5b943e2b943f6c81dbbd4e2eca5121f4fcc39139e3d1219d6d89bd925b77d9fe"},"layers":[],"annotations":{"foo":"bar"}}`,
}, {
desc: "index",
in: empty.Index,
want: `{"schemaVersion":2,"manifests":null,"annotations":{"foo":"bar"}}`,
}, {
desc: "arbitrary",
in: arbitrary{},
want: `{"annotations":{"foo":"bar"},"hello":"world"}`,
}} {
t.Run(c.desc, func(t *testing.T) {
got, err := mutate.Annotations(c.in, anns).RawManifest()
if err != nil {
t.Fatalf("Annotations: %v", err)
}
if d := cmp.Diff(c.want, string(got)); d != "" {
t.Errorf("Diff(-want,+got): %s", d)
}
})
}
}
func TestMutateCreatedAt(t *testing.T) {
source := sourceImage(t)
want := time.Now().Add(-2 * time.Minute)
result, err := mutate.CreatedAt(source, v1.Time{Time: want})
if err != nil {
t.Fatalf("CreatedAt: %v", err)
}
if configDigestsAreEqual(t, source, result) {
t.Errorf("mutating the created time MUST mutate the config digest")
}
got := getConfigFile(t, result).Created.Time
if got != want {
t.Errorf("mutating the created time MUST mutate the time from %v to %v", got, want)
}
}
func TestMutateTime(t *testing.T) {
source := sourceImage(t)
want := time.Time{}
result, err := mutate.Time(source, want)
if err != nil {
t.Fatalf("failed to mutate a config: %v", err)
}
if configDigestsAreEqual(t, source, result) {
t.Fatal("mutating the created time MUST mutate the config digest")
}
got := getConfigFile(t, result).Created.Time
if got != want {
t.Fatalf("mutating the created time MUST mutate the time from %v to %v", got, want)
}
}
func TestMutateMediaType(t *testing.T) {
want := types.OCIManifestSchema1
wantCfg := types.OCIConfigJSON
img := mutate.MediaType(empty.Image, want)
img = mutate.ConfigMediaType(img, wantCfg)
got, err := img.MediaType()
if err != nil {
t.Fatal(err)
}
if want != got {
t.Errorf("%q != %q", want, got)
}
manifest, err := img.Manifest()
if err != nil {
t.Fatal(err)
}
if manifest.MediaType != "" {
t.Errorf("MediaType should not be set for OCI media types: %v", manifest.MediaType)
}
if gotCfg := manifest.Config.MediaType; gotCfg != wantCfg {
t.Errorf("manifest.Config.MediaType = %v, wanted %v", gotCfg, wantCfg)
}
want = types.DockerManifestSchema2
wantCfg = types.DockerConfigJSON
img = mutate.MediaType(img, want)
img = mutate.ConfigMediaType(img, wantCfg)
got, err = img.MediaType()
if err != nil {
t.Fatal(err)
}
if want != got {
t.Errorf("%q != %q", want, got)
}
manifest, err = img.Manifest()
if err != nil {
t.Fatal(err)
}
if manifest.MediaType != want {
t.Errorf("MediaType should be set for Docker media types: %v", manifest.MediaType)
}
if gotCfg := manifest.Config.MediaType; gotCfg != wantCfg {
t.Errorf("manifest.Config.MediaType = %v, wanted %v", gotCfg, wantCfg)
}
want = types.OCIImageIndex
idx := mutate.IndexMediaType(empty.Index, want)
got, err = idx.MediaType()
if err != nil {
t.Fatal(err)
}
if want != got {
t.Errorf("%q != %q", want, got)
}
im, err := idx.IndexManifest()
if err != nil {
t.Fatal(err)
}
if im.MediaType != "" {
t.Errorf("MediaType should not be set for OCI media types: %v", im.MediaType)
}
want = types.DockerManifestList
idx = mutate.IndexMediaType(idx, want)
got, err = idx.MediaType()
if err != nil {
t.Fatal(err)
}
if want != got {
t.Errorf("%q != %q", want, got)
}
im, err = idx.IndexManifest()
if err != nil {
t.Fatal(err)
}
if im.MediaType != want {
t.Errorf("MediaType should be set for Docker media types: %v", im.MediaType)
}
}
func TestAppendStreamableLayer(t *testing.T) {
img, err := mutate.AppendLayers(
sourceImage(t),
stream.NewLayer(ioutil.NopCloser(strings.NewReader(strings.Repeat("a", 100)))),
stream.NewLayer(ioutil.NopCloser(strings.NewReader(strings.Repeat("b", 100)))),
stream.NewLayer(ioutil.NopCloser(strings.NewReader(strings.Repeat("c", 100)))),
)
if err != nil {
t.Fatalf("AppendLayers: %v", err)
}
// Until the streams are consumed, the image manifest is not yet computed.
if _, err := img.Manifest(); !errors.Is(err, stream.ErrNotComputed) {
t.Errorf("Manifest: got %v, want %v", err, stream.ErrNotComputed)
}
// We can still get Layers while some are not yet computed.
ls, err := img.Layers()
if err != nil {
t.Errorf("Layers: %v", err)
}
wantDigests := []string{
"sha256:bfa1c600931132f55789459e2f5a5eb85659ac91bc5a54ce09e3ed14809f8a7f",
"sha256:77a52b9a141dcc4d3d277d053193765dca725626f50eaf56b903ac2439cf7fd1",
"sha256:b78472d63f6e3d31059819173b56fcb0d9479a2b13c097d4addd84889f6aff06",
}
for i, l := range ls[1:] {
rc, err := l.Compressed()
if err != nil {
t.Errorf("Layer %d Compressed: %v", i, err)
}
// Consume the layer's stream and close it to compute the
// layer's metadata.
if _, err := io.Copy(ioutil.Discard, rc); err != nil {
t.Errorf("Reading layer %d: %v", i, err)
}
if err := rc.Close(); err != nil {
t.Errorf("Closing layer %d: %v", i, err)
}
// The layer's metadata is now available.
h, err := l.Digest()
if err != nil {
t.Errorf("Digest after consuming layer %d: %v", i, err)
}
if h.String() != wantDigests[i] {
t.Errorf("Layer %d digest got %q, want %q", i, h, wantDigests[i])
}
}
// Now that the streamable layers have been consumed, the image's
// manifest can be computed.
if _, err := img.Manifest(); err != nil {
t.Errorf("Manifest: %v", err)
}
h, err := img.Digest()
if err != nil {
t.Errorf("Digest: %v", err)
}
wantDigest := "sha256:14d140947afedc6901b490265a08bc8ebe7f9d9faed6fdf19a451f054a7dd746"
if h.String() != wantDigest {
t.Errorf("Image digest got %q, want %q", h, wantDigest)
}
}
func TestCanonical(t *testing.T) {
source := sourceImage(t)
img, err := mutate.Canonical(source)
if err != nil {
t.Fatal(err)
}
sourceCf, err := source.ConfigFile()
if err != nil {
t.Fatal(err)
}
cf, err := img.ConfigFile()
if err != nil {
t.Fatal(err)
}
var want, got string
want = cf.Architecture
got = sourceCf.Architecture
if want != got {
t.Errorf("%q != %q", want, got)
}
want = cf.OS
got = sourceCf.OS
if want != got {
t.Errorf("%q != %q", want, got)
}
want = cf.OSVersion
got = sourceCf.OSVersion
if want != got {
t.Errorf("%q != %q", want, got)
}
for _, s := range []string{
cf.Container,
cf.Config.Hostname,
cf.DockerVersion,
} {
if s != "" {
t.Errorf("non-zeroed string: %v", s)
}
}
expectedLayerTime := time.Unix(0, 0)
layers := getLayers(t, img)
for _, layer := range layers {
assertMTime(t, layer, expectedLayerTime)
}
}
func TestRemoveManifests(t *testing.T) |
func TestImageImmutability(t *testing.T) {
img := mutate.MediaType(empty.Image, types.OCIManifestSchema1)
t.Run("manifest", func(t *testing.T) {
// Check that Manifest is immutable.
changed, err := img.Manifest()
if err != nil {
t.Errorf("Manifest() = %v", err)
}
want := changed.DeepCopy() // Create a copy of original before mutating it.
changed.MediaType = types.DockerManifestList
if got, err := img.Manifest(); err != nil {
t.Errorf("Manifest() = %v", err)
} else if !cmp.Equal(got, want) {
t.Errorf("manifest changed! %s", cmp.Diff(got, want))
}
})
t.Run("config file", func(t *testing.T) {
// Check that ConfigFile is immutable.
changed, err := img.ConfigFile()
if err != nil {
t.Errorf("ConfigFile() = %v", err)
}
want := changed.DeepCopy() // Create a copy of original before mutating it.
changed.Author = "Jay Pegg"
if got, err := img.ConfigFile(); err != nil {
t.Errorf("ConfigFile() = %v", err)
} else if !cmp.Equal(got, want) {
t.Errorf("ConfigFile changed! %s", cmp.Diff(got, want))
}
})
}
func assertMTime(t *testing.T, layer v1.Layer, expectedTime time.Time) {
l, err := layer.Uncompressed()
if err != nil {
t.Fatalf("reading layer failed: %v", err)
}
tr := tar.NewReader(l)
for {
header, err := tr.Next()
if errors.Is(err, io.EOF) {
break
}
if err != nil {
t.Fatalf("Error reading layer: %v", err)
}
mtime := header.ModTime
if mtime.Equal(expectedTime) == false {
t.Errorf("unexpected mod time for layer. expected %v, got %v.", expectedTime, mtime)
}
}
}
func sourceImage(t *testing.T) v1.Image {
t.Helper()
image, err := tarball.ImageFromPath("testdata/source_image.tar", nil)
if err != nil {
t.Fatalf("Error loading image: %v", err)
}
return image
}
func getManifest(t *testing.T, i v1.Image) *v1.Manifest {
t.Helper()
m, err := i.Manifest()
if err != nil {
t.Fatalf("Error fetching image manifest: %v", err)
}
return m
}
func getLayers(t *testing.T, i v1.Image) []v1.Layer {
t.Helper()
l, err := i.Layers()
if err != nil {
t.Fatalf("Error fetching image layers: %v", err)
}
return l
}
func getConfigFile(t *testing.T, i v1.Image) *v1.ConfigFile {
t.Helper()
c, err := i.ConfigFile()
if err != nil {
t.Fatalf("Error fetching image config file: %v", err)
}
return c
}
func configFilesAreEqual(t *testing.T, first, second v1.Image) bool {
t.Helper()
fc := getConfigFile(t, first)
sc := getConfigFile(t, second)
return cmp.Equal(fc, sc)
}
func configDigestsAreEqual(t *testing.T, first, second v1.Image) bool {
t.Helper()
fm := getManifest(t, first)
sm := getManifest(t, second)
return fm.Config.Digest == sm.Config.Digest
}
func configSizesAreEqual(t *testing.T, first, second v1.Image) bool {
t.Helper()
fm := getManifest(t, first)
sm := getManifest(t, second)
return fm.Config.Size == sm.Config.Size
}
func manifestsAreEqual(t *testing.T, first, second v1.Image) bool {
t.Helper()
fm := getManifest(t, first)
sm := getManifest(t, second)
return cmp.Equal(fm, sm)
}
type mockLayer struct{}
func (m mockLayer) Digest() (v1.Hash, error) {
return v1.Hash{Algorithm: "fake", Hex: "digest"}, nil
}
func (m mockLayer) DiffID() (v1.Hash, error) {
return v1.Hash{Algorithm: "fake", Hex: "diff id"}, nil
}
func (m mockLayer) MediaType() (types.MediaType, error) {
return "some-media-type", nil
}
func (m mockLayer) Size() (int64, error) { return 137438691328, nil }
func (m mockLayer) Compressed() (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("compressed times")), nil
}
func (m mockLayer) Uncompressed() (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("uncompressed")), nil
}
| {
// Load up the registry.
count := 3
for i := 0; i < count; i++ {
ii, err := random.Index(1024, int64(count), int64(count))
if err != nil {
t.Fatal(err)
}
// test removing the first layer, second layer or the third layer
manifest, err := ii.IndexManifest()
if err != nil {
t.Fatal(err)
}
if len(manifest.Manifests) != count {
t.Fatalf("mismatched manifests on setup, had %d, expected %d", len(manifest.Manifests), count)
}
digest := manifest.Manifests[i].Digest
ii = mutate.RemoveManifests(ii, match.Digests(digest))
manifest, err = ii.IndexManifest()
if err != nil {
t.Fatal(err)
}
if len(manifest.Manifests) != (count - 1) {
t.Fatalf("mismatched manifests after removal, had %d, expected %d", len(manifest.Manifests), count-1)
}
for j, m := range manifest.Manifests {
if m.Digest == digest {
t.Fatalf("unexpectedly found removed hash %v at position %d", digest, j)
}
}
}
} |
nonsecure_content_warning.tsx | import * as React from "react";
import { API } from "../../../api";
export interface NonsecureContentProps {
/** The warning to show the user if one of the URLs is insecure. */
children?: React.ReactChild;
urls: string[];
}
/** Given a list of URLs, returns bool indicating wether or not all URLs in the
* set are HTTPS:// */
export const allAreHttps = (urls: string[]) => !urls
.map(x => API.parseURL(x).protocol === "https:")
.includes(false);
/** Stateless component that renders `props.children` when `props.urls`
* contains a non-HTTPS URL. */ | } | export function NonsecureContentWarning(props: NonsecureContentProps) {
const { urls, children } = props;
return <div>{allAreHttps(urls) ? "" : children}</div>; |
02_Post_Order_Traversing_of_Tree.py | """
Node is defined as
self.left (the left child of the node)
self.right (the right child of the node)
self.data (the value of the node)
"""
def | (root):
#Write your code here
if(root == None):
return
postOrder(root.left)
postOrder(root.right)
print root.data,
| postOrder |
erg_2_fat_5_vektor.js | function | (arr) {
var obj = {};
var ret_arr = [];
for (var i = 0; i < arr.length; i++) {
obj[arr[i]] = true;
}
for (var key in obj) {
ret_arr.push(key);
}
return ret_arr;
}
var intersect = function (nums1, nums2) {
var result = nums1.filter(x => nums2.includes(x));
return [...new Set(result)];
};
var recipes_top10 = [240619, 236805, 65671, 8673, 8763, 72311, 112206, 17311,
12825, 46813
];
var recipes_user = [229277, 231233, 8855, 30007, 59303
];
var top10_list = [];
db.getCollection('recipes_without_reviews').find({
id: {
$in: recipes_top10
}
}).forEach(
function (item) {
for (i = 0; i < recipes_top10.length; i++) {
if (recipes_top10[i] == item.id) {
top10_list[i] = item;
}
}
})
var ing_list_id_top10 = [];
top10_list.forEach(function (item) {
item.ingredients.forEach(function (element) {
ing_list_id_top10.push(element.id);
});
});
var recipes_user_list = [];
db.getCollection('recipes_without_reviews').find({
id: {
$in: recipes_user
}
}).forEach(
function (item) {
for (i = 0; i < recipes_user.length; i++) {
if (recipes_user[i] == item.id) {
recipes_user_list[i] = item;
}
}
})
print(recipes_user_list)
var i = 1;
var ing_list_id_userprofile = [];
recipes_user_list.forEach(function (item) {
item.ingredients.forEach(function (element) {
ing_list_id_userprofile.push(element.id);
});
});
// remove duplicate ingredients from top 10 and user list
var no_ing_dupes_top10 = remove_duplicates(ing_list_id_top10)
var no_ing_dupes_user = remove_duplicates(ing_list_id_userprofile)
var ing_intersect = intersect(no_ing_dupes_top10, no_ing_dupes_user)
var test = intersect(recipes_top10, recipes_user)
//convert string id into ints
var no_ing_dupes_top10 = no_ing_dupes_top10.map(Number)
var no_ing_dupes_user = no_ing_dupes_user.map(Number)
var ing_intersect = ing_intersect.map(Number)
var remove_ing_id = [16421, 4342, 4397, 16406, 16157, 6307, 6494, 2496, 16238, 16317]
//remove not wanted ingredients using the remove_ing_id array
var no_ing_dupes_user = no_ing_dupes_user.filter(function (el) {
return remove_ing_id.indexOf(el) < 0;
});
var no_ing_dupes_top10 = no_ing_dupes_top10.filter(function (el) {
return remove_ing_id.indexOf(el) < 0;
});
var ing_intersect = ing_intersect.filter(function (el) {
return remove_ing_id.indexOf(el) < 0;
});
var result_list = [];
db.getCollection('recipes_without_reviews').find({
id: {
$in: recipes_top10
}
}).forEach(
function (item) {
for (i = 0; i < recipes_top10.length; i++) {
if (recipes_top10[i] == item.id) {
result_list[i] = item;
}
}
})
var recipe_position = 1;
var count_intersect_ings = 0;
result_list.forEach(function (item) {
var ing_counter = 0;
var count_intersct_ings_per_recipe = 0;
print("##################################################################### \n"+
"+Pos:" + recipe_position++)
print(item.name + ", ID: " + item.id + "\n")
item.nutritions.forEach(function (element) {
if (element.name == 'Fat' || element.name == 'Carbohydrates') {
print(element.name + ": " + element.display_value);
}
});
print("\nIngredientss: ")
item.ingredients.forEach(function (element) {
ing_counter++;
for (i = 0; i < ing_intersect.length; i++) {
if (ing_intersect[i] == element.id) {
print("ing_id: " + element.id + " ing_name: " + element.name)
count_intersect_ings++;
count_intersct_ings_per_recipe++;
}
}
});
print("\ning_number: " + count_intersct_ings_per_recipe + "/" + ing_counter + " item_rating: " + item.rating + ", made_it_count: " +
item.made_it_count + ", review_count: " + item.review_count + ", rating_count: " + item.rating_count + "\n");
});
var a = ing_list_id_top10.length / top10_list.length
var b = ing_intersect.length / a
print("Einzigartige Ueberschneidungszutaten TOP10 VS Userprofil: " + ing_intersect.length + "\n" +
"Aufsummiert Ueberschneidungszutaten: " + count_intersect_ings + "\n" +
"Einzigartige Zutaten TOP10: " + no_ing_dupes_top10.length + "\n" +
"Durchschnitt Anzahl Zutaten Top10: " + ing_list_id_top10.length / top10_list.length + "\n" +
"Einzigartige Zutaten User: " + no_ing_dupes_user.length + "\n" +
"Ueberschneidungszutaten/Durch.Zutaten Top10: " + b.toFixed(2).toString().replace(".", ",") + "\n"
)
print(ing_intersect.length + "\n" +
count_intersect_ings + "\n" +
no_ing_dupes_top10.length + "\n" +
a.toString().replace(".", ",") + "\n" +
no_ing_dupes_user.length + "\n" +
b.toFixed(2).toString().replace(".", ",") + "\n"
)
var result_list = [];
db.getCollection('recipes_without_reviews').find({
id: {
$in: recipes_user
}
}).forEach(
function (item) {
for (i = 0; i < recipes_user.length; i++) {
if (recipes_user[i] == item.id) {
result_list[i] = item;
}
}
})
var recipe_position = 1;
var count_intersect_ings = 0;
result_list.forEach(function (item) {
var ing_counter = 0;
var count_intersct_ings_per_recipe = 0;
print("##################################################################### \n"+
"+Pos:" + recipe_position++)
print(item.name + ", ID: " + item.id + "\n")
item.nutritions.forEach(function (element) {
if (element.name == 'Fat' || element.name == 'Carbohydrates') {
print(element.name + ": " + element.display_value);
}
});
print("\nIngredientss: ")
item.ingredients.forEach(function (element) {
ing_counter++;
for (i = 0; i < ing_intersect.length; i++) {
if (ing_intersect[i] == element.id) {
print("ing_id: " + element.id + " ing_name: " + element.name)
count_intersect_ings++;
count_intersct_ings_per_recipe++;
}
}
});
print("\ning_number: " + count_intersct_ings_per_recipe + "/" + ing_counter + " item_rating: " + item.rating + ", made_it_count: " +
item.made_it_count + ", review_count: " + item.review_count + ", rating_count: " + item.rating_count + "\n");
}); | remove_duplicates |
count_server.py | # Auto-generated at 2021-09-27T17:01:22.744049+08:00
# from: Justice DsmController Service (2.4.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HttpResponse
from ...models import ModelsCountServerResponse
from ...models import ResponseError
class CountServer(Operation):
"""Count all managed servers (CountServer)
Properties:
url: /dsmcontroller/admin/namespaces/{namespace}/servers/count
method: GET
tags: Admin
consumes: ["application/json"]
produces: ["application/json"]
security: bearer
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - ModelsCountServerResponse (servers listed)
401: Unauthorized - ResponseError (Unauthorized)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = "/dsmcontroller/admin/namespaces/{namespace}/servers/count"
_method: str = "GET"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_security: Optional[str] = "bearer"
_location_query: str = None
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def security(self) -> Optional[str]:
return self._security
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
def get_full_url(self, base_url: Union[None, str] = None) -> str:
result = base_url if base_url is not None else ""
# path params
url = self.url
for k, v in self.get_path_params().items():
url = url.replace(f"{{{k}}}", v)
result += url
return result
# noinspection PyMethodMayBeStatic
def get_all_required_fields(self) -> List[str]:
return [
"namespace",
] | # endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
def is_valid(self) -> bool:
if not hasattr(self, "namespace") or self.namespace is None:
return False
return True
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> CountServer:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = str()
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[Union[None, ModelsCountServerResponse], Union[None, ResponseError]]:
"""Parse the given response.
200: OK - ModelsCountServerResponse (servers listed)
401: Unauthorized - ResponseError (Unauthorized)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
if code == 200:
return ModelsCountServerResponse.create_from_dict(content), None
if code == 401:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
was_handled, undocumented_response = HttpResponse.try_create_undocumented_response(code, content)
if was_handled:
return None, undocumented_response
return None, HttpResponse.create_unhandled_error()
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
) -> CountServer:
instance = cls()
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> CountServer:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
}
# endregion static methods | |
lib.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Collection types.
//!
//! See [std::collections](../std/collections) for a detailed discussion of collections in Rust.
// Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
#![cfg_attr(stage0, feature(custom_attribute))]
#![crate_name = "collections"]
#![unstable(feature = "collections")]
#![staged_api]
#![crate_type = "rlib"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "http://doc.rust-lang.org/nightly/",
html_playground_url = "http://play.rust-lang.org/")]
#![doc(test(no_crate_inject))]
#![allow(trivial_casts)]
#![feature(alloc)]
#![feature(box_syntax)]
#![feature(box_patterns)]
#![feature(core)]
#![feature(lang_items)]
#![feature(staged_api)]
#![feature(unboxed_closures)]
#![feature(unicode)]
#![feature(unique)]
#![feature(unsafe_no_drop_flag, filling_drop)]
#![feature(step_by)]
#![feature(str_char)]
#![feature(str_words)]
#![feature(slice_patterns)]
#![feature(utf8_error)]
#![cfg_attr(test, feature(rand, rustc_private, test, hash, collections,
collections_drain, collections_range))]
#![cfg_attr(test, allow(deprecated))] // rand
#![feature(no_std)]
#![no_std]
#[macro_use]
extern crate core;
extern crate rustc_unicode;
extern crate alloc;
#[cfg(test)] #[macro_use] extern crate std;
#[cfg(test)] extern crate test;
pub use binary_heap::BinaryHeap;
pub use bit_vec::BitVec;
pub use bit_set::BitSet;
pub use btree_map::BTreeMap;
pub use btree_set::BTreeSet;
pub use linked_list::LinkedList;
pub use enum_set::EnumSet;
pub use vec_deque::VecDeque;
pub use string::String;
pub use vec::Vec;
pub use vec_map::VecMap;
// Needed for the vec! macro
pub use alloc::boxed;
#[macro_use]
mod macros;
pub mod binary_heap;
mod bit;
mod btree;
pub mod borrow;
pub mod enum_set;
pub mod fmt;
pub mod linked_list;
pub mod range;
pub mod slice;
pub mod str;
pub mod string;
pub mod vec;
pub mod vec_deque;
pub mod vec_map;
#[unstable(feature = "collections",
reason = "RFC 509")]
pub mod bit_vec {
pub use bit::{BitVec, Iter};
}
#[unstable(feature = "collections",
reason = "RFC 509")]
pub mod bit_set {
pub use bit::{BitSet, Union, Intersection, Difference, SymmetricDifference};
pub use bit::SetIter as Iter;
}
#[stable(feature = "rust1", since = "1.0.0")]
pub mod btree_map {
pub use btree::map::*;
}
#[stable(feature = "rust1", since = "1.0.0")]
pub mod btree_set {
pub use btree::set::*;
}
// FIXME(#14344) this shouldn't be necessary
#[doc(hidden)]
pub fn fixme_14344_be_sure_to_link_to_collections() {}
#[cfg(not(test))]
mod std {
pub use core::ops; // RangeFull
}
/// An endpoint of a range of keys.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum Bound<T> {
/// An inclusive bound. | Unbounded,
} | Included(T),
/// An exclusive bound.
Excluded(T),
/// An infinite endpoint. Indicates that there is no bound in this direction. |
lib.rs | // Copyright 2018-2020 Parity Technologies (UK) Ltd.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg_attr(not(feature = "std"), no_std)]
use ink_lang as ink;
#[ink::contract]
mod erc20_delegator_test {
#[cfg(not(feature = "ink-as-dependency"))]
use ink_storage::{
traits::{
PackedLayout,
SpreadLayout,
},
collections::HashMap as StorageHashMap,
// Vec as StorageVec,
};
// use ink_prelude::string::String;
use erc20::Erc20;
use erc20_test::Erc20Test;
/// Indicates whether a transaction is already confirmed or needs further confirmations.
#[derive(scale::Encode, scale::Decode, Clone, Copy, SpreadLayout, PackedLayout)]
#[cfg_attr(
feature = "std",
derive(scale_info::TypeInfo, ink_storage::traits::StorageLayout)
)]
pub struct DAOTemplate {
owner: AccountId,
erc20_code_hash: Hash,
erc20_test_code_hash: Hash,
}
/// Indicates whether a transaction is already confirmed or needs further confirmations.
#[derive(scale::Encode, scale::Decode, Clone, SpreadLayout, PackedLayout)]
#[cfg_attr(
feature = "std",
derive(scale_info::TypeInfo, ink_storage::traits::StorageLayout)
)]
pub struct DAOInstance {
owner: AccountId,
erc20: Erc20,
erc20_test: Erc20Test,
}
#[ink(storage)]
pub struct Erc20DelegatorTest {
owner: AccountId,
template_index: u64,
template_map: StorageHashMap<u64, DAOTemplate>,
instance_index: u64,
instance_map: StorageHashMap<u64, DAOInstance>,
}
impl Erc20DelegatorTest {
#[ink(constructor)]
pub fn new(controller: AccountId) -> Self {
let instance = Self {
owner: controller,
template_index: 0,
template_map: StorageHashMap::new(),
instance_index: 0,
instance_map: StorageHashMap::new(),
};
instance
}
#[ink(message)]
pub fn add_template(&mut self, erc20_code_hash: Hash, erc20_test_code_hash: Hash) -> bool {
assert_eq!(self.template_index + 1 > self.template_index, true);
let from = self.env().caller();
// TODO add template event, declare index, owner
self.template_map.insert(self.template_index, DAOTemplate {
owner: from,
erc20_code_hash,
erc20_test_code_hash,
});
self.template_index += 1;
true
}
#[ink(message)]
pub fn query_all_template(&self, index: u64) -> DAOTemplate {
return *self.template_map.get(&index).unwrap()
}
#[ink(message)]
pub fn instance_by_template(&mut self, index: u64, initial_supply: u64, decimals: u8, controller: AccountId) -> bool {
assert_eq!(self.instance_index + 1 > self.instance_index, true);
let total_balance = Self::env().balance();
assert_eq!(total_balance >= 20, true);
// query template info
let template = self.template_map.get(&index).unwrap();
// instance erc20
// TODO add instance event
let erc_instance = Erc20::new(initial_supply, decimals, controller)
.endowment(total_balance / 4)
.code_hash(template.erc20_code_hash)
.instantiate()
.expect("failed at instantiating the `Erc20` contract");
// instance erc20 test
// TODO add instance event, declare index, type, instance/accountId
let erc_test_instance = Erc20Test::new(erc_instance.clone())
.endowment(total_balance / 4)
.code_hash(template.erc20_test_code_hash)
.instantiate()
.expect("failed at instantiating the `Erc20` contract");
// put instance
self.instance_map.insert(self.instance_index, DAOInstance {
owner: controller,
erc20: erc_instance,
erc20_test: erc_test_instance,
});
self.instance_index += 1;
true
}
#[ink(message)]
pub fn | (&mut self, index: u64, to: AccountId, value: u64) -> bool {
let instance = self.instance_map.get_mut(&index).unwrap();
instance.erc20.transfer(to, value)
}
#[ink(message)]
pub fn transfer_by_erc20_test_in_erc20(&mut self, index: u64, to: AccountId, value: u64) -> bool {
let instance = self.instance_map.get_mut(&index).unwrap();
instance.erc20_test.transfer_in_erc20(to, value)
}
}
}
| transfer |
helpers.js | export const defaultTo = {
fn: () => {},
str: '',
zero: 0,
arr: [], | obj: {},
}; |
|
usb.js | "use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const protocol_1 = __importDefault(require("../../protocol"));
const command_1 = __importDefault(require("../../command"));
const RE_OK = /restarting in/;
class UsbCommand extends command_1.default {
execute() {
this._send('usb:');
return this.parser.readAscii(4).then((reply) => {
switch (reply) {
case protocol_1.default.OKAY:
return this.parser.readAll().then(function (value) {
if (RE_OK.test(value.toString())) {
return true;
| }
});
case protocol_1.default.FAIL:
return this.parser.readError();
default:
return this.parser.unexpected(reply, 'OKAY or FAIL');
}
});
}
}
exports.default = UsbCommand; | }
else {
throw new Error(value.toString().trim());
|
subscription.rs | use crate::clocks::WasiMonotonicClock;
use crate::file::WasiFile;
use crate::Error;
use bitflags::bitflags;
use cap_std::time::{Duration, Instant};
use std::cell::{Cell, Ref};
bitflags! {
pub struct RwEventFlags: u32 {
const HANGUP = 0b1;
}
}
pub struct RwSubscription<'a> {
pub file: Ref<'a, dyn WasiFile>,
status: Cell<Option<Result<(u64, RwEventFlags), Error>>>,
}
impl<'a> RwSubscription<'a> {
pub fn new(file: Ref<'a, dyn WasiFile>) -> Self {
Self {
file,
status: Cell::new(None),
}
}
pub fn complete(&self, size: u64, flags: RwEventFlags) {
self.status.set(Some(Ok((size, flags))))
}
pub fn error(&self, error: Error) {
self.status.set(Some(Err(error)))
}
pub fn result(self) -> Option<Result<(u64, RwEventFlags), Error>> {
self.status.into_inner()
}
}
pub struct MonotonicClockSubscription<'a> {
pub clock: &'a dyn WasiMonotonicClock,
pub deadline: Instant,
pub precision: Duration,
}
impl<'a> MonotonicClockSubscription<'a> {
pub fn now(&self) -> Instant {
self.clock.now(self.precision)
}
pub fn duration_until(&self) -> Option<Duration> {
self.deadline.checked_duration_since(self.now())
}
pub fn result(&self) -> Option<Result<(), Error>> {
if self.now().checked_duration_since(self.deadline).is_some() | else {
None
}
}
}
pub enum Subscription<'a> {
Read(RwSubscription<'a>),
Write(RwSubscription<'a>),
MonotonicClock(MonotonicClockSubscription<'a>),
}
pub enum SubscriptionResult {
Read(Result<(u64, RwEventFlags), Error>),
Write(Result<(u64, RwEventFlags), Error>),
MonotonicClock(Result<(), Error>),
}
impl SubscriptionResult {
pub fn from_subscription(s: Subscription) -> Option<SubscriptionResult> {
match s {
Subscription::Read(s) => s.result().map(SubscriptionResult::Read),
Subscription::Write(s) => s.result().map(SubscriptionResult::Write),
Subscription::MonotonicClock(s) => s.result().map(SubscriptionResult::MonotonicClock),
}
}
}
| {
Some(Ok(()))
} |
reltime.go | // Package reltime implements a "time ago" algorithm.
package reltime // import "code.soquee.net/reltime"
import (
"math"
"strconv"
"time"
)
// TimeAgo transforms the difference between a time and the current time into a
// human readable string.
//
// It is a convenience wrapper for Ago(time.Until(t)).
// For more information see the example on Ago.
func TimeAgo(t time.Time) string {
return Ago(time.Until(t))
}
// Ago transforms durations into human readable strings.
func Ago(d time.Duration) string {
// Take the absolute value and record the sign.
sign := d >> 63
d = (d ^ sign) - sign
var ago string
if sign < 0 |
switch {
case d < 30*time.Second:
return "just now"
case d < time.Minute:
return "less than a minute" + ago
case d < time.Minute+(30*time.Second):
return "about a minute" + ago
case d < 30*time.Minute:
return strconv.FormatFloat(math.Round(d.Minutes()), 'f', -1, 64) + " minutes" + ago
case d < time.Hour:
return "less than an hour" + ago
case d < time.Hour+30*time.Minute:
return "about an hour" + ago
case d < 24*time.Hour:
return strconv.FormatFloat(math.Round(d.Hours()), 'f', -1, 64) + " hours" + ago
case d < 32*time.Hour:
return "about a day" + ago
case d < 28*24*time.Hour:
return strconv.FormatFloat(math.Round(d.Hours()/24), 'f', -1, 64) + " days" + ago
case d < 45*24*time.Hour:
return "about a month" + ago
case d < 12*30*24*time.Hour:
return strconv.FormatFloat(math.Round(d.Hours()/24/30), 'f', -1, 64) + " months" + ago
case d < 18*30*24*time.Hour:
return "about a year" + ago
}
return strconv.FormatFloat(math.Round(d.Hours()/24/30/12), 'f', -1, 64) + " years" + ago
}
| {
ago = " ago"
} |
geometry.py | from __future__ import absolute_import
# Copyright (c) 2010-2019 openpyexcel
from openpyexcel.descriptors.serialisable import Serialisable
from openpyexcel.descriptors import (
Typed,
Float,
Integer,
Bool,
MinMax,
Set,
NoneSet,
String,
Alias,
)
from openpyexcel.descriptors.excel import Coordinate, Percentage
from openpyexcel.descriptors.nested import (
EmptyTag
)
from openpyexcel.descriptors.excel import ExtensionList as OfficeArtExtensionList
from .colors import ColorChoiceDescriptor
from .fill import (
GradientFillProperties,
BlipFillProperties,
PatternFillProperties,
)
from .line import LineProperties
from openpyexcel.styles.colors import Color
from openpyexcel.xml.constants import DRAWING_NS
class Point2D(Serialisable):
tagname = "off"
namespace = DRAWING_NS
x = Coordinate()
y = Coordinate()
def __init__(self,
x=None,
y=None,
):
self.x = x
self.y = y
class PositiveSize2D(Serialisable):
tagname = "ext"
namespace = DRAWING_NS
"""
Dimensions in EMUs
"""
cx = Integer()
width = Alias('cx')
cy = Integer()
height = Alias('cy')
def __init__(self,
cx=None,
cy=None,
):
self.cx = cx
self.cy = cy
class Transform2D(Serialisable):
tagname = "xfrm"
namespace = DRAWING_NS
rot = Integer(allow_none=True)
flipH = Bool(allow_none=True)
flipV = Bool(allow_none=True)
off = Typed(expected_type=Point2D, allow_none=True)
ext = Typed(expected_type=PositiveSize2D, allow_none=True)
chOff = Typed(expected_type=Point2D, allow_none=True)
chExt = Typed(expected_type=PositiveSize2D, allow_none=True)
__elements__ = ('off', 'ext', 'chOff', 'chExt')
def __init__(self,
rot=None,
flipH=None,
flipV=None,
off=None,
ext=None,
chOff=None,
chExt=None,
):
self.rot = rot
self.flipH = flipH
self.flipV = flipV
self.off = off
self.ext = ext
self.chOff = chOff
self.chExt = chExt
class GroupTransform2D(Serialisable):
tagname = "xfrm"
namespace = DRAWING_NS
rot = Integer(allow_none=True)
flipH = Bool(allow_none=True)
flipV = Bool(allow_none=True)
off = Typed(expected_type=Point2D, allow_none=True)
ext = Typed(expected_type=PositiveSize2D, allow_none=True)
chOff = Typed(expected_type=Point2D, allow_none=True)
chExt = Typed(expected_type=PositiveSize2D, allow_none=True)
__elements__ = ("off", "ext", "chOff", "chExt")
def __init__(self,
rot=0,
flipH=None,
flipV=None,
off=None,
ext=None,
chOff=None,
chExt=None,
):
self.rot = rot
self.flipH = flipH
self.flipV = flipV
self.off = off
self.ext = ext
self.chOff = chOff
self.chExt = chExt
class SphereCoords(Serialisable):
tagname = "sphereCoords" # usually
lat = Integer()
lon = Integer()
rev = Integer()
def __init__(self,
lat=None,
lon=None,
rev=None,
):
self.lat = lat
self.lon = lon
self.rev = rev
class Camera(Serialisable):
tagname = "camera"
prst = Set(values=[
'legacyObliqueTopLeft', 'legacyObliqueTop', 'legacyObliqueTopRight', 'legacyObliqueLeft',
'legacyObliqueFront', 'legacyObliqueRight', 'legacyObliqueBottomLeft',
'legacyObliqueBottom', 'legacyObliqueBottomRight', 'legacyPerspectiveTopLeft',
'legacyPerspectiveTop', 'legacyPerspectiveTopRight', 'legacyPerspectiveLeft',
'legacyPerspectiveFront', 'legacyPerspectiveRight', 'legacyPerspectiveBottomLeft',
'legacyPerspectiveBottom', 'legacyPerspectiveBottomRight', 'orthographicFront',
'isometricTopUp', 'isometricTopDown', 'isometricBottomUp', 'isometricBottomDown',
'isometricLeftUp', 'isometricLeftDown', 'isometricRightUp', 'isometricRightDown',
'isometricOffAxis1Left', 'isometricOffAxis1Right', 'isometricOffAxis1Top',
'isometricOffAxis2Left', 'isometricOffAxis2Right', 'isometricOffAxis2Top',
'isometricOffAxis3Left', 'isometricOffAxis3Right', 'isometricOffAxis3Bottom',
'isometricOffAxis4Left', 'isometricOffAxis4Right', 'isometricOffAxis4Bottom',
'obliqueTopLeft', 'obliqueTop', 'obliqueTopRight', 'obliqueLeft', 'obliqueRight',
'obliqueBottomLeft', 'obliqueBottom', 'obliqueBottomRight', 'perspectiveFront',
'perspectiveLeft', 'perspectiveRight', 'perspectiveAbove', 'perspectiveBelow',
'perspectiveAboveLeftFacing', 'perspectiveAboveRightFacing',
'perspectiveContrastingLeftFacing', 'perspectiveContrastingRightFacing',
'perspectiveHeroicLeftFacing', 'perspectiveHeroicRightFacing',
'perspectiveHeroicExtremeLeftFacing', 'perspectiveHeroicExtremeRightFacing',
'perspectiveRelaxed', 'perspectiveRelaxedModerately'])
fov = Integer(allow_none=True)
zoom = Typed(expected_type=Percentage, allow_none=True)
rot = Typed(expected_type=SphereCoords, allow_none=True)
def __init__(self,
prst=None,
fov=None,
zoom=None,
rot=None,
):
self.prst = prst
self.fov = fov
self.zoom = zoom
self.rot = rot
class LightRig(Serialisable):
tagname = "lightRig"
rig = Set(values=['legacyFlat1', 'legacyFlat2', 'legacyFlat3', 'legacyFlat4', 'legacyNormal1',
'legacyNormal2', 'legacyNormal3', 'legacyNormal4', 'legacyHarsh1',
'legacyHarsh2', 'legacyHarsh3', 'legacyHarsh4', 'threePt', 'balanced',
'soft', 'harsh', 'flood', 'contrasting', 'morning', 'sunrise', 'sunset',
'chilly', 'freezing', 'flat', 'twoPt', 'glow', 'brightRoom']
)
dir = Set(values=(['tl', 't', 'tr', 'l', 'r', 'bl', 'b', 'br']))
rot = Typed(expected_type=SphereCoords, allow_none=True)
def __init__(self,
rig=None,
dir=None,
rot=None,
):
self.rig = rig
self.dir = dir
self.rot = rot
class Vector3D(Serialisable):
tagname = "vector"
dx = Integer() # can be in or universl measure :-/
dy = Integer()
dz = Integer()
def __init__(self,
dx=None,
dy=None,
dz=None,
):
self.dx = dx
self.dy = dy
self.dz = dz
class Point3D(Serialisable):
tagname = "anchor"
x = Integer()
y = Integer()
z = Integer()
def __init__(self,
x=None,
y=None,
z=None,
):
self.x = x
self.y = y
self.z = z
class Backdrop(Serialisable):
anchor = Typed(expected_type=Point3D, )
norm = Typed(expected_type=Vector3D, )
up = Typed(expected_type=Vector3D, )
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
def __init__(self,
anchor=None,
norm=None,
up=None,
extLst=None,
):
self.anchor = anchor
self.norm = norm
self.up = up
self.extLst = extLst
class Scene3D(Serialisable):
camera = Typed(expected_type=Camera, )
lightRig = Typed(expected_type=LightRig, )
backdrop = Typed(expected_type=Backdrop, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
def __init__(self,
camera=None,
lightRig=None,
backdrop=None,
extLst=None,
):
self.camera = camera
self.lightRig = lightRig
self.backdrop = backdrop
self.extLst = extLst
class Bevel(Serialisable):
tagname = "bevel"
w = Integer()
h = Integer()
prst = NoneSet(values=
['relaxedInset', 'circle', 'slope', 'cross', 'angle',
'softRound', 'convex', 'coolSlant', 'divot', 'riblet',
'hardEdge', 'artDeco']
)
def __init__(self,
w=None,
h=None,
prst=None,
):
self.w = w
self.h = h
self.prst = prst
class Shape3D(Serialisable):
namespace = DRAWING_NS
z = Typed(expected_type=Coordinate, allow_none=True)
extrusionH = Integer(allow_none=True)
contourW = Integer(allow_none=True)
prstMaterial = NoneSet(values=[
'legacyMatte','legacyPlastic', 'legacyMetal', 'legacyWireframe', 'matte', 'plastic',
'metal', 'warmMatte', 'translucentPowder', 'powder', 'dkEdge',
'softEdge', 'clear', 'flat', 'softmetal']
)
bevelT = Typed(expected_type=Bevel, allow_none=True)
bevelB = Typed(expected_type=Bevel, allow_none=True)
extrusionClr = Typed(expected_type=Color, allow_none=True)
contourClr = Typed(expected_type=Color, allow_none=True)
extLst = Typed(expected_type=OfficeArtExtensionList, allow_none=True)
def __init__(self,
z=None,
extrusionH=None,
contourW=None,
prstMaterial=None,
bevelT=None,
bevelB=None,
extrusionClr=None,
contourClr=None,
extLst=None,
):
self.z = z
self.extrusionH = extrusionH
self.contourW = contourW
self.prstMaterial = prstMaterial
self.bevelT = bevelT
self.bevelB = bevelB
self.extrusionClr = extrusionClr
self.contourClr = contourClr
self.extLst = extLst
class Path2D(Serialisable):
w = Float()
h = Float()
fill = NoneSet(values=(['norm', 'lighten', 'lightenLess', 'darken', 'darkenLess']))
stroke = Bool(allow_none=True)
extrusionOk = Bool(allow_none=True)
def __init__(self,
w=None,
h=None,
fill=None,
stroke=None,
extrusionOk=None, | self.h = h
self.fill = fill
self.stroke = stroke
self.extrusionOk = extrusionOk
class Path2DList(Serialisable):
path = Typed(expected_type=Path2D, allow_none=True)
def __init__(self,
path=None,
):
self.path = path
class GeomRect(Serialisable):
l = Coordinate()
t = Coordinate()
r = Coordinate()
b = Coordinate()
def __init__(self,
l=None,
t=None,
r=None,
b=None,
):
self.l = l
self.t = t
self.r = r
self.b = b
class AdjPoint2D(Serialisable):
x = Coordinate()
y = Coordinate()
def __init__(self,
x=None,
y=None,
):
self.x = x
self.y = y
class ConnectionSite(Serialisable):
ang = MinMax(min=0, max=360) # guess work, can also be a name
pos = Typed(expected_type=AdjPoint2D, )
def __init__(self,
ang=None,
pos=None,
):
self.ang = ang
self.pos = pos
class ConnectionSiteList(Serialisable):
cxn = Typed(expected_type=ConnectionSite, allow_none=True)
def __init__(self,
cxn=None,
):
self.cxn = cxn
class AdjustHandleList(Serialisable):
pass
class GeomGuide(Serialisable):
name = String()
fmla = String()
def __init__(self,
name=None,
fmla=None,
):
self.name = name
self.fmla = fmla
class GeomGuideList(Serialisable):
gd = Typed(expected_type=GeomGuide, allow_none=True)
def __init__(self,
gd=None,
):
self.gd = gd
class CustomGeometry2D(Serialisable):
avLst = Typed(expected_type=GeomGuideList, allow_none=True)
gdLst = Typed(expected_type=GeomGuideList, allow_none=True)
ahLst = Typed(expected_type=AdjustHandleList, allow_none=True)
cxnLst = Typed(expected_type=ConnectionSiteList, allow_none=True)
rect = Typed(expected_type=GeomRect, allow_none=True)
pathLst = Typed(expected_type=Path2DList, )
def __init__(self,
avLst=None,
gdLst=None,
ahLst=None,
cxnLst=None,
rect=None,
pathLst=None,
):
self.avLst = avLst
self.gdLst = gdLst
self.ahLst = ahLst
self.cxnLst = cxnLst
self.rect = rect
self.pathLst = pathLst
class PresetGeometry2D(Serialisable):
namespace = DRAWING_NS
prst = Set(values=(
['line', 'lineInv', 'triangle', 'rtTriangle', 'rect',
'diamond', 'parallelogram', 'trapezoid', 'nonIsoscelesTrapezoid',
'pentagon', 'hexagon', 'heptagon', 'octagon', 'decagon', 'dodecagon',
'star4', 'star5', 'star6', 'star7', 'star8', 'star10', 'star12',
'star16', 'star24', 'star32', 'roundRect', 'round1Rect',
'round2SameRect', 'round2DiagRect', 'snipRoundRect', 'snip1Rect',
'snip2SameRect', 'snip2DiagRect', 'plaque', 'ellipse', 'teardrop',
'homePlate', 'chevron', 'pieWedge', 'pie', 'blockArc', 'donut',
'noSmoking', 'rightArrow', 'leftArrow', 'upArrow', 'downArrow',
'stripedRightArrow', 'notchedRightArrow', 'bentUpArrow',
'leftRightArrow', 'upDownArrow', 'leftUpArrow', 'leftRightUpArrow',
'quadArrow', 'leftArrowCallout', 'rightArrowCallout', 'upArrowCallout',
'downArrowCallout', 'leftRightArrowCallout', 'upDownArrowCallout',
'quadArrowCallout', 'bentArrow', 'uturnArrow', 'circularArrow',
'leftCircularArrow', 'leftRightCircularArrow', 'curvedRightArrow',
'curvedLeftArrow', 'curvedUpArrow', 'curvedDownArrow', 'swooshArrow',
'cube', 'can', 'lightningBolt', 'heart', 'sun', 'moon', 'smileyFace',
'irregularSeal1', 'irregularSeal2', 'foldedCorner', 'bevel', 'frame',
'halfFrame', 'corner', 'diagStripe', 'chord', 'arc', 'leftBracket',
'rightBracket', 'leftBrace', 'rightBrace', 'bracketPair', 'bracePair',
'straightConnector1', 'bentConnector2', 'bentConnector3',
'bentConnector4', 'bentConnector5', 'curvedConnector2',
'curvedConnector3', 'curvedConnector4', 'curvedConnector5', 'callout1',
'callout2', 'callout3', 'accentCallout1', 'accentCallout2',
'accentCallout3', 'borderCallout1', 'borderCallout2', 'borderCallout3',
'accentBorderCallout1', 'accentBorderCallout2', 'accentBorderCallout3',
'wedgeRectCallout', 'wedgeRoundRectCallout', 'wedgeEllipseCallout',
'cloudCallout', 'cloud', 'ribbon', 'ribbon2', 'ellipseRibbon',
'ellipseRibbon2', 'leftRightRibbon', 'verticalScroll',
'horizontalScroll', 'wave', 'doubleWave', 'plus', 'flowChartProcess',
'flowChartDecision', 'flowChartInputOutput',
'flowChartPredefinedProcess', 'flowChartInternalStorage',
'flowChartDocument', 'flowChartMultidocument', 'flowChartTerminator',
'flowChartPreparation', 'flowChartManualInput',
'flowChartManualOperation', 'flowChartConnector', 'flowChartPunchedCard',
'flowChartPunchedTape', 'flowChartSummingJunction', 'flowChartOr',
'flowChartCollate', 'flowChartSort', 'flowChartExtract',
'flowChartMerge', 'flowChartOfflineStorage', 'flowChartOnlineStorage',
'flowChartMagneticTape', 'flowChartMagneticDisk',
'flowChartMagneticDrum', 'flowChartDisplay', 'flowChartDelay',
'flowChartAlternateProcess', 'flowChartOffpageConnector',
'actionButtonBlank', 'actionButtonHome', 'actionButtonHelp',
'actionButtonInformation', 'actionButtonForwardNext',
'actionButtonBackPrevious', 'actionButtonEnd', 'actionButtonBeginning',
'actionButtonReturn', 'actionButtonDocument', 'actionButtonSound',
'actionButtonMovie', 'gear6', 'gear9', 'funnel', 'mathPlus', 'mathMinus',
'mathMultiply', 'mathDivide', 'mathEqual', 'mathNotEqual', 'cornerTabs',
'squareTabs', 'plaqueTabs', 'chartX', 'chartStar', 'chartPlus']))
avLst = Typed(expected_type=GeomGuideList, allow_none=True)
def __init__(self,
prst=None,
avLst=None,
):
self.prst = prst
self.avLst = avLst
class FontReference(Serialisable):
idx = NoneSet(values=(['major', 'minor']))
def __init__(self,
idx=None,
):
self.idx = idx
class StyleMatrixReference(Serialisable):
idx = Integer()
def __init__(self,
idx=None,
):
self.idx = idx
class ShapeStyle(Serialisable):
lnRef = Typed(expected_type=StyleMatrixReference, )
fillRef = Typed(expected_type=StyleMatrixReference, )
effectRef = Typed(expected_type=StyleMatrixReference, )
fontRef = Typed(expected_type=FontReference, )
def __init__(self,
lnRef=None,
fillRef=None,
effectRef=None,
fontRef=None,
):
self.lnRef = lnRef
self.fillRef = fillRef
self.effectRef = effectRef
self.fontRef = fontRef | ):
self.w = w |
events_txstarted.rs | #[doc = "Register `EVENTS_TXSTARTED` reader"]
pub struct R(crate::R<EVENTS_TXSTARTED_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<EVENTS_TXSTARTED_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<EVENTS_TXSTARTED_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<EVENTS_TXSTARTED_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `EVENTS_TXSTARTED` writer"]
pub struct W(crate::W<EVENTS_TXSTARTED_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<EVENTS_TXSTARTED_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<EVENTS_TXSTARTED_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<EVENTS_TXSTARTED_SPEC>) -> Self {
W(writer)
}
}
#[doc = "UART transmitter has started\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum EVENTS_TXSTARTED_A { | #[doc = "0: Event not generated"]
NOTGENERATED = 0,
#[doc = "1: Event generated"]
GENERATED = 1,
}
impl From<EVENTS_TXSTARTED_A> for bool {
#[inline(always)]
fn from(variant: EVENTS_TXSTARTED_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `EVENTS_TXSTARTED` reader - UART transmitter has started"]
pub struct EVENTS_TXSTARTED_R(crate::FieldReader<bool, EVENTS_TXSTARTED_A>);
impl EVENTS_TXSTARTED_R {
#[inline(always)]
pub(crate) fn new(bits: bool) -> Self {
EVENTS_TXSTARTED_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> EVENTS_TXSTARTED_A {
match self.bits {
false => EVENTS_TXSTARTED_A::NOTGENERATED,
true => EVENTS_TXSTARTED_A::GENERATED,
}
}
#[doc = "Checks if the value of the field is `NOTGENERATED`"]
#[inline(always)]
pub fn is_not_generated(&self) -> bool {
**self == EVENTS_TXSTARTED_A::NOTGENERATED
}
#[doc = "Checks if the value of the field is `GENERATED`"]
#[inline(always)]
pub fn is_generated(&self) -> bool {
**self == EVENTS_TXSTARTED_A::GENERATED
}
}
impl core::ops::Deref for EVENTS_TXSTARTED_R {
type Target = crate::FieldReader<bool, EVENTS_TXSTARTED_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `EVENTS_TXSTARTED` writer - UART transmitter has started"]
pub struct EVENTS_TXSTARTED_W<'a> {
w: &'a mut W,
}
impl<'a> EVENTS_TXSTARTED_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: EVENTS_TXSTARTED_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Event not generated"]
#[inline(always)]
pub fn not_generated(self) -> &'a mut W {
self.variant(EVENTS_TXSTARTED_A::NOTGENERATED)
}
#[doc = "Event generated"]
#[inline(always)]
pub fn generated(self) -> &'a mut W {
self.variant(EVENTS_TXSTARTED_A::GENERATED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0 - UART transmitter has started"]
#[inline(always)]
pub fn events_txstarted(&self) -> EVENTS_TXSTARTED_R {
EVENTS_TXSTARTED_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - UART transmitter has started"]
#[inline(always)]
pub fn events_txstarted(&mut self) -> EVENTS_TXSTARTED_W {
EVENTS_TXSTARTED_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "UART transmitter has started\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [events_txstarted](index.html) module"]
pub struct EVENTS_TXSTARTED_SPEC;
impl crate::RegisterSpec for EVENTS_TXSTARTED_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [events_txstarted::R](R) reader structure"]
impl crate::Readable for EVENTS_TXSTARTED_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [events_txstarted::W](W) writer structure"]
impl crate::Writable for EVENTS_TXSTARTED_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets EVENTS_TXSTARTED to value 0"]
impl crate::Resettable for EVENTS_TXSTARTED_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | |
merge-sort-recursive.py | ## A recursive implementation of merge sort.
## Author: AJ
## test case 1 45 849 904 79 48942 7
class sorting:
def __init__(self):
|
def get_data(self):
self.arr = list(map(int, input().split()))
return self.arr
def merge_sort(self, array):
if len(array) == 1:
return array
mid = len(array)//2 # Find the approximate middle point
# Separate the arrays using the middle point
left = self.merge_sort(array[:mid])
right = self.merge_sort(array[mid:])
left_indx = 0
right_indx = 0
complete_arr = []
# Iteratively combine the two arrays by sorting them appropriately
for indx in range(len(left) + len(right)):
if (left_indx < len(left)) and (right_indx < len(right)):
if (left[left_indx] < right[right_indx]):
complete_arr.append(left[left_indx])
left_indx+=1
else:
complete_arr.append(right[right_indx])
right_indx += 1
elif left_indx == len(left):
for indx2 in range(right_indx, len(right)):
complete_arr.append(right[indx2])
right_indx = len(right)
else:
for indx2 in range(left_indx, len(left)):
complete_arr.append(left[indx2])
left_indx = len(left)
#print(len(left)+len(right), len(complete_arr))
return complete_arr
def runner(self):
self.arr = self.merge_sort(self.arr)
def print_arr(self):
for ele in self.arr:
print(str(ele) + ' ', end='')
print('')
array = sorting()
array.get_data()
array.print_arr()
array.merge_sort(array.arr)
array.runner()
array.print_arr()
| self.arr = [] |
raft.go | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"encoding/json"
"expvar"
"sort"
"sync"
"sync/atomic"
"time"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/pkg/contention"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/etcd/wal"
"github.com/coreos/etcd/wal/walpb"
"github.com/coreos/pkg/capnslog"
)
const (
// Number of entries for slow follower to catch-up after compacting
// the raft storage entries.
// We expect the follower has a millisecond level latency with the leader.
// The max throughput is around 10K. Keep a 5K entries is enough for helping
// follower to catch up.
numberOfCatchUpEntries = 5000
// The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
// Assuming the RTT is around 10ms, 1MB max size is large enough.
maxSizePerMsg = 1 * 1024 * 1024
// Never overflow the rafthttp buffer, which is 4096.
// TODO: a better const?
maxInflightMsgs = 4096 / 8
)
var (
// protects raftStatus
raftStatusMu sync.Mutex
// indirection for expvar func interface
// expvar panics when publishing duplicate name
// expvar does not support remove a registered name
// so only register a func that calls raftStatus
// and change raftStatus as we need.
raftStatus func() raft.Status
)
func init() {
raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
expvar.Publish("raft.status", expvar.Func(func() interface{} {
raftStatusMu.Lock()
defer raftStatusMu.Unlock()
return raftStatus()
}))
}
type RaftTimer interface {
Index() uint64
Term() uint64
}
// apply contains entries, snapshot to be applied. Once
// an apply is consumed, the entries will be persisted to
// to raft storage concurrently; the application must read
// raftDone before assuming the raft messages are stable.
type apply struct {
entries []raftpb.Entry
snapshot raftpb.Snapshot
// notifyc synchronizes etcd server applies with the raft node
notifyc chan struct{}
}
type raftNode struct {
// Cache of the latest raft index and raft term the server has seen.
// These three unit64 fields must be the first elements to keep 64-bit
// alignment for atomic access to the fields.
index uint64
term uint64
lead uint64
tickMu *sync.Mutex
raftNodeConfig
// a chan to send/receive snapshot
msgSnapC chan raftpb.Message
// a chan to send out apply
applyc chan apply
// a chan to send out readState
readStateC chan raft.ReadState
// utility
ticker *time.Ticker
// contention detectors for raft heartbeat message
td *contention.TimeoutDetector
stopped chan struct{}
done chan struct{}
}
type raftNodeConfig struct {
// to check if msg receiver is removed from cluster
isIDRemoved func(id uint64) bool
raft.Node
raftStorage *raft.MemoryStorage
storage Storage
heartbeat time.Duration // for logging
// transport specifies the transport to send and receive msgs to members.
// Sending messages MUST NOT block. It is okay to drop messages, since
// clients should timeout and reissue their messages.
// If transport is nil, server will panic.
transport rafthttp.Transporter
}
func newRaftNode(cfg raftNodeConfig) *raftNode |
// raft.Node does not have locks in Raft package
func (r *raftNode) tick() {
r.tickMu.Lock()
r.Tick()
r.tickMu.Unlock()
}
// start prepares and starts raftNode in a new goroutine. It is no longer safe
// to modify the fields after it has been started.
func (r *raftNode) start(rh *raftReadyHandler) {
internalTimeout := time.Second
go func() {
defer r.onStop()
islead := false
for {
select {
case <-r.ticker.C:
r.tick()
case rd := <-r.Ready():
if rd.SoftState != nil {
newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
if newLeader {
leaderChanges.Inc()
}
if rd.SoftState.Lead == raft.None {
hasLeader.Set(0)
} else {
hasLeader.Set(1)
}
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
islead = rd.RaftState == raft.StateLeader
if islead {
isLeader.Set(1)
} else {
isLeader.Set(0)
}
rh.updateLeadership(newLeader)
r.td.Reset()
}
if len(rd.ReadStates) != 0 {
select {
case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
case <-time.After(internalTimeout):
plog.Warningf("timed out sending read state")
case <-r.stopped:
return
}
}
notifyc := make(chan struct{}, 1)
ap := apply{
entries: rd.CommittedEntries,
snapshot: rd.Snapshot,
notifyc: notifyc,
}
updateCommittedIndex(&ap, rh)
select {
case r.applyc <- ap:
case <-r.stopped:
return
}
// the leader can write to its disk in parallel with replicating to the followers and them
// writing to their disks.
// For more details, check raft thesis 10.2.1
if islead {
// gofail: var raftBeforeLeaderSend struct{}
r.transport.Send(r.processMessages(rd.Messages))
}
// gofail: var raftBeforeSave struct{}
if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
plog.Fatalf("raft save state and entries error: %v", err)
}
if !raft.IsEmptyHardState(rd.HardState) {
proposalsCommitted.Set(float64(rd.HardState.Commit))
}
// gofail: var raftAfterSave struct{}
if !raft.IsEmptySnap(rd.Snapshot) {
// gofail: var raftBeforeSaveSnap struct{}
if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
plog.Fatalf("raft save snapshot error: %v", err)
}
// etcdserver now claim the snapshot has been persisted onto the disk
notifyc <- struct{}{}
// gofail: var raftAfterSaveSnap struct{}
r.raftStorage.ApplySnapshot(rd.Snapshot)
plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
// gofail: var raftAfterApplySnap struct{}
}
r.raftStorage.Append(rd.Entries)
if !islead {
// finish processing incoming messages before we signal raftdone chan
msgs := r.processMessages(rd.Messages)
// now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
notifyc <- struct{}{}
// Candidate or follower needs to wait for all pending configuration
// changes to be applied before sending messages.
// Otherwise we might incorrectly count votes (e.g. votes from removed members).
// Also slow machine's follower raft-layer could proceed to become the leader
// on its own single-node cluster, before apply-layer applies the config change.
// We simply wait for ALL pending entries to be applied for now.
// We might improve this later on if it causes unnecessary long blocking issues.
waitApply := false
for _, ent := range rd.CommittedEntries {
if ent.Type == raftpb.EntryConfChange {
waitApply = true
break
}
}
if waitApply {
// blocks until 'applyAll' calls 'applyWait.Trigger'
// to be in sync with scheduled config-change job
// (assume notifyc has cap of 1)
select {
case notifyc <- struct{}{}:
case <-r.stopped:
return
}
}
// gofail: var raftBeforeFollowerSend struct{}
r.transport.Send(msgs)
} else {
// leader already processed 'MsgSnap' and signaled
notifyc <- struct{}{}
}
r.Advance()
case <-r.stopped:
return
}
}
}()
}
func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
var ci uint64
if len(ap.entries) != 0 {
ci = ap.entries[len(ap.entries)-1].Index
}
if ap.snapshot.Metadata.Index > ci {
ci = ap.snapshot.Metadata.Index
}
if ci != 0 {
rh.updateCommittedIndex(ci)
}
}
func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
sentAppResp := false
for i := len(ms) - 1; i >= 0; i-- {
if r.isIDRemoved(ms[i].To) {
ms[i].To = 0
}
if ms[i].Type == raftpb.MsgAppResp {
if sentAppResp {
ms[i].To = 0
} else {
sentAppResp = true
}
}
if ms[i].Type == raftpb.MsgSnap {
// There are two separate data store: the store for v2, and the KV for v3.
// The msgSnap only contains the most recent snapshot of store without KV.
// So we need to redirect the msgSnap to etcd server main loop for merging in the
// current store snapshot and KV snapshot.
select {
case r.msgSnapC <- ms[i]:
default:
// drop msgSnap if the inflight chan if full.
}
ms[i].To = 0
}
if ms[i].Type == raftpb.MsgHeartbeat {
ok, exceed := r.td.Observe(ms[i].To)
if !ok {
// TODO: limit request rate.
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v, to %x)", r.heartbeat, exceed, ms[i].To)
plog.Warningf("server is likely overloaded")
heartbeatSendFailures.Inc()
}
}
}
return ms
}
func (r *raftNode) apply() chan apply {
return r.applyc
}
func (r *raftNode) stop() {
r.stopped <- struct{}{}
<-r.done
}
func (r *raftNode) onStop() {
r.Stop()
r.ticker.Stop()
r.transport.Stop()
if err := r.storage.Close(); err != nil {
plog.Panicf("raft close storage error: %v", err)
}
close(r.done)
}
// for testing
func (r *raftNode) pauseSending() {
p := r.transport.(rafthttp.Pausable)
p.Pause()
}
func (r *raftNode) resumeSending() {
p := r.transport.(rafthttp.Pausable)
p.Resume()
}
// advanceTicks advances ticks of Raft node.
// This can be used for fast-forwarding election
// ticks in multi data-center deployments, thus
// speeding up election process.
func (r *raftNode) advanceTicks(ticks int) {
for i := 0; i < ticks; i++ {
r.tick()
}
}
func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
var err error
member := cl.MemberByName(cfg.Name)
metadata := pbutil.MustMarshal(
&pb.Metadata{
NodeID: uint64(member.ID),
ClusterID: uint64(cl.ID()),
},
)
if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
plog.Panicf("create wal error: %v", err)
}
peers := make([]raft.Peer, len(ids))
for i, id := range ids {
ctx, err := json.Marshal((*cl).Member(id))
if err != nil {
plog.Panicf("marshal member should never fail: %v", err)
}
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
}
id = member.ID
plog.Infof("starting member %s in cluster %s", id, cl.ID())
s = raft.NewMemoryStorage()
c := &raft.Config{
ID: uint64(id),
ElectionTick: cfg.ElectionTicks,
HeartbeatTick: 1,
Storage: s,
MaxSizePerMsg: maxSizePerMsg,
MaxInflightMsgs: maxInflightMsgs,
CheckQuorum: true,
}
n = raft.StartNode(c, peers)
raftStatusMu.Lock()
raftStatus = n.Status
raftStatusMu.Unlock()
return id, n, s, w
}
func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
cl := membership.NewCluster("")
cl.SetID(cid)
s := raft.NewMemoryStorage()
if snapshot != nil {
s.ApplySnapshot(*snapshot)
}
s.SetHardState(st)
s.Append(ents)
c := &raft.Config{
ID: uint64(id),
ElectionTick: cfg.ElectionTicks,
HeartbeatTick: 1,
Storage: s,
MaxSizePerMsg: maxSizePerMsg,
MaxInflightMsgs: maxInflightMsgs,
CheckQuorum: true,
}
n := raft.RestartNode(c)
raftStatusMu.Lock()
raftStatus = n.Status
raftStatusMu.Unlock()
return id, cl, n, s, w
}
func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
var walsnap walpb.Snapshot
if snapshot != nil {
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
}
w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
// discard the previously uncommitted entries
for i, ent := range ents {
if ent.Index > st.Commit {
plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
ents = ents[:i]
break
}
}
// force append the configuration change entries
toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
ents = append(ents, toAppEnts...)
// force commit newly appended entries
err := w.Save(raftpb.HardState{}, toAppEnts)
if err != nil {
plog.Fatalf("%v", err)
}
if len(ents) != 0 {
st.Commit = ents[len(ents)-1].Index
}
plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
cl := membership.NewCluster("")
cl.SetID(cid)
s := raft.NewMemoryStorage()
if snapshot != nil {
s.ApplySnapshot(*snapshot)
}
s.SetHardState(st)
s.Append(ents)
c := &raft.Config{
ID: uint64(id),
ElectionTick: cfg.ElectionTicks,
HeartbeatTick: 1,
Storage: s,
MaxSizePerMsg: maxSizePerMsg,
MaxInflightMsgs: maxInflightMsgs,
CheckQuorum: true,
}
n := raft.RestartNode(c)
raftStatus = n.Status
return id, cl, n, s, w
}
// getIDs returns an ordered set of IDs included in the given snapshot and
// the entries. The given snapshot/entries can contain two kinds of
// ID-related entry:
// - ConfChangeAddNode, in which case the contained ID will be added into the set.
// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
ids := make(map[uint64]bool)
if snap != nil {
for _, id := range snap.Metadata.ConfState.Nodes {
ids[id] = true
}
}
for _, e := range ents {
if e.Type != raftpb.EntryConfChange {
continue
}
var cc raftpb.ConfChange
pbutil.MustUnmarshal(&cc, e.Data)
switch cc.Type {
case raftpb.ConfChangeAddNode:
ids[cc.NodeID] = true
case raftpb.ConfChangeRemoveNode:
delete(ids, cc.NodeID)
case raftpb.ConfChangeUpdateNode:
// do nothing
default:
plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
}
}
sids := make(types.Uint64Slice, 0, len(ids))
for id := range ids {
sids = append(sids, id)
}
sort.Sort(sids)
return []uint64(sids)
}
// createConfigChangeEnts creates a series of Raft entries (i.e.
// EntryConfChange) to remove the set of given IDs from the cluster. The ID
// `self` is _not_ removed, even if present in the set.
// If `self` is not inside the given ids, it creates a Raft entry to add a
// default member with the given `self`.
func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
ents := make([]raftpb.Entry, 0)
next := index + 1
found := false
for _, id := range ids {
if id == self {
found = true
continue
}
cc := &raftpb.ConfChange{
Type: raftpb.ConfChangeRemoveNode,
NodeID: id,
}
e := raftpb.Entry{
Type: raftpb.EntryConfChange,
Data: pbutil.MustMarshal(cc),
Term: term,
Index: next,
}
ents = append(ents, e)
next++
}
if !found {
m := membership.Member{
ID: types.ID(self),
RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
}
ctx, err := json.Marshal(m)
if err != nil {
plog.Panicf("marshal member should never fail: %v", err)
}
cc := &raftpb.ConfChange{
Type: raftpb.ConfChangeAddNode,
NodeID: self,
Context: ctx,
}
e := raftpb.Entry{
Type: raftpb.EntryConfChange,
Data: pbutil.MustMarshal(cc),
Term: term,
Index: next,
}
ents = append(ents, e)
}
return ents
}
| {
r := &raftNode{
tickMu: new(sync.Mutex),
raftNodeConfig: cfg,
// set up contention detectors for raft heartbeat message.
// expect to send a heartbeat within 2 heartbeat intervals.
td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
readStateC: make(chan raft.ReadState, 1),
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
applyc: make(chan apply),
stopped: make(chan struct{}),
done: make(chan struct{}),
}
if r.heartbeat == 0 {
r.ticker = &time.Ticker{}
} else {
r.ticker = time.NewTicker(r.heartbeat)
}
return r
} |
vote_reviser_test.go | package staking
import (
"context"
"math/big"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil/testdb"
)
func TestVoteReviser(t *testing.T) {
r := require.New(t)
ctrl := gomock.NewController(t)
sm := testdb.NewMockStateManager(ctrl)
_, err := sm.PutState(
&totalBucketCount{count: 0},
protocol.NamespaceOption(StakingNameSpace),
protocol.KeyOption(TotalBucketKey),
)
r.NoError(err)
tests := []struct {
cand address.Address | owner address.Address
amount *big.Int
duration uint32
index uint64
}{
{
identityset.Address(6),
identityset.Address(6),
unit.ConvertIotxToRau(1100000),
21,
0,
},
{
identityset.Address(1),
identityset.Address(1),
unit.ConvertIotxToRau(1200000),
21,
1,
},
{
identityset.Address(2),
identityset.Address(2),
unit.ConvertIotxToRau(1200000),
14,
2,
},
{
identityset.Address(3),
identityset.Address(3),
unit.ConvertIotxToRau(1200000),
25,
3,
},
{
identityset.Address(4),
identityset.Address(4),
unit.ConvertIotxToRau(1200000),
31,
4,
},
{
identityset.Address(5),
identityset.Address(5),
unit.ConvertIotxToRau(1199999),
31,
5,
},
{
identityset.Address(1),
identityset.Address(2),
big.NewInt(2100000000),
21,
6,
},
{
identityset.Address(2),
identityset.Address(3),
big.NewInt(1400000000),
14,
7,
},
{
identityset.Address(3),
identityset.Address(4),
big.NewInt(2500000000),
25,
8,
},
{
identityset.Address(4),
identityset.Address(1),
big.NewInt(3100000000),
31,
9,
},
}
// test loading with no candidate in stateDB
stk, err := NewProtocol(
nil,
genesis.Default.Staking,
nil,
genesis.Default.GreenlandBlockHeight,
genesis.Default.HawaiiBlockHeight,
)
r.NotNil(stk)
r.NoError(err)
// write a number of buckets into stateDB
for _, e := range tests {
vb := NewVoteBucket(e.cand, e.owner, e.amount, e.duration, time.Now(), true)
index, err := putBucketAndIndex(sm, vb)
r.NoError(err)
r.Equal(index, vb.Index)
}
// load candidates from stateDB and verify
ctx := genesis.WithGenesisContext(context.Background(), genesis.Default)
v, err := stk.Start(ctx, sm)
sm.WriteView(protocolID, v)
r.NoError(err)
_, ok := v.(*ViewData)
r.True(ok)
csm, err := NewCandidateStateManager(sm, false)
r.NoError(err)
// load a number of candidates
for _, e := range testCandidates {
r.NoError(csm.Upsert(e.d))
}
r.NoError(csm.Commit())
// test revise
r.False(stk.voteReviser.isCacheExist(genesis.Default.GreenlandBlockHeight))
r.False(stk.voteReviser.isCacheExist(genesis.Default.HawaiiBlockHeight))
r.NoError(stk.voteReviser.Revise(csm, genesis.Default.HawaiiBlockHeight))
r.NoError(csm.Commit())
r.False(stk.voteReviser.isCacheExist(genesis.Default.GreenlandBlockHeight))
// verify self-stake and total votes match
result, ok := stk.voteReviser.result(genesis.Default.HawaiiBlockHeight)
r.True(ok)
r.Equal(len(testCandidates), len(result))
cv := genesis.Default.Staking.VoteWeightCalConsts
for _, c := range result {
cand := csm.GetByOwner(c.Owner)
r.True(c.Equal(cand))
for _, cand := range testCandidates {
if address.Equal(cand.d.Owner, c.Owner) {
r.Equal(0, cand.d.SelfStake.Cmp(c.SelfStake))
}
}
for _, v := range tests {
if address.Equal(v.cand, c.Owner) && v.index != c.SelfStakeBucketIdx {
bucket, err := getBucket(csm, v.index)
r.NoError(err)
total := calculateVoteWeight(cv, bucket, false)
bucket, err = getBucket(csm, c.SelfStakeBucketIdx)
r.NoError(err)
total.Add(total, calculateVoteWeight(cv, bucket, true))
r.Equal(0, total.Cmp(c.Votes))
break
}
}
}
} | |
views.py | from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework import permissions
from tutorial.quickstart.serializers import UserSerializer, GroupSerializer
class UserViewSet(viewsets.ModelViewSet):
|
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
permission_classes = [permissions.IsAuthenticated]
| """
API endpoint that allows users to be viewed or edited.
"""
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [permissions.IsAuthenticated] |
time.py | import sys
time = input().strip()
splitted = time.split(':')
hours_12 = int(splitted[0])
mins = splitted[1]
secs = splitted[2][:2]
is_pm = splitted[2].endswith("PM")
if (is_pm):
if (hours_12 >= 1 and hours_12 < 12): # between 1pm and 11:59pm
hours_12 += 12
else:
if (hours_12 == 12): |
print(':'.join(list((str(hours_12).zfill(2), mins, secs)))) | hours_12 -= 12 |
consume_2432355.py | # Snowflake Damage Skin
success = sm.addDamageSkin(2432355)
if success: | sm.chat("The Snowflake Damage Skin has been added to your account's damage skin collection.") |
|
ms3_core.launch.py | # Copyright 2020-2021, The Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch Modules for Milestone 3 of the AVP 2020 Demo."""
from ament_index_python import get_package_share_directory
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch.actions import IncludeLaunchDescription
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.conditions import IfCondition
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
import os
def ge | :
"""
Launch all nodes defined in the architecture for Milestone 3 of the AVP 2020 Demo.
More details about what is included can
be found at https://gitlab.com/autowarefoundation/autoware.auto/AutowareAuto/-/milestones/25.
"""
avp_demo_pkg_prefix = get_package_share_directory('autoware_auto_avp_demo')
euclidean_cluster_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/euclidean_cluster.param.yaml')
ray_ground_classifier_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/ray_ground_classifier.param.yaml')
scan_downsampler_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/scan_downsampler_ms3.param.yaml')
lanelet2_map_provider_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/lanelet2_map_provider.param.yaml')
lane_planner_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/lane_planner.param.yaml')
parking_planner_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/parking_planner.param.yaml')
object_collision_estimator_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/object_collision_estimator.param.yaml')
behavior_planner_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/behavior_planner.param.yaml')
off_map_obstacles_filter_param_file = os.path.join(
avp_demo_pkg_prefix, 'param/off_map_obstacles_filter.param.yaml')
point_cloud_fusion_node_pkg_prefix = get_package_share_directory(
'point_cloud_fusion_nodes')
# Arguments
euclidean_cluster_param = DeclareLaunchArgument(
'euclidean_cluster_param_file',
default_value=euclidean_cluster_param_file,
description='Path to config file for Euclidean Clustering'
)
ray_ground_classifier_param = DeclareLaunchArgument(
'ray_ground_classifier_param_file',
default_value=ray_ground_classifier_param_file,
description='Path to config file for Ray Ground Classifier'
)
with_obstacles_param = DeclareLaunchArgument(
'with_obstacles',
default_value='True',
description='Enable obstacle detection'
)
scan_downsampler_param = DeclareLaunchArgument(
'scan_downsampler_param_file',
default_value=scan_downsampler_param_file,
description='Path to config file for lidar scan downsampler'
)
lanelet2_map_provider_param = DeclareLaunchArgument(
'lanelet2_map_provider_param_file',
default_value=lanelet2_map_provider_param_file,
description='Path to parameter file for Lanelet2 Map Provider'
)
lane_planner_param = DeclareLaunchArgument(
'lane_planner_param_file',
default_value=lane_planner_param_file,
description='Path to parameter file for lane planner'
)
parking_planner_param = DeclareLaunchArgument(
'parking_planner_param_file',
default_value=parking_planner_param_file,
description='Path to parameter file for parking planner'
)
object_collision_estimator_param = DeclareLaunchArgument(
'object_collision_estimator_param_file',
default_value=object_collision_estimator_param_file,
description='Path to parameter file for object collision estimator'
)
behavior_planner_param = DeclareLaunchArgument(
'behavior_planner_param_file',
default_value=behavior_planner_param_file,
description='Path to parameter file for behavior planner'
)
off_map_obstacles_filter_param = DeclareLaunchArgument(
'off_map_obstacles_filter_param_file',
default_value=off_map_obstacles_filter_param_file,
description='Path to parameter file for off-map obstacle filter'
)
# Nodes
euclidean_clustering = Node(
package='euclidean_cluster_nodes',
executable='euclidean_cluster_node_exe',
namespace='perception',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('euclidean_cluster_param_file')],
remappings=[
("points_in", "points_nonground")
]
)
# point cloud fusion runner to fuse front and rear lidar
point_cloud_fusion_node = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(point_cloud_fusion_node_pkg_prefix,
'launch/vlp16_sim_lexus_pc_fusion.launch.py'))
)
ray_ground_classifier = Node(
package='ray_ground_classifier_nodes',
executable='ray_ground_classifier_cloud_node_exe',
namespace='perception',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('ray_ground_classifier_param_file')],
remappings=[("points_in", "/lidars/points_fused")]
)
scan_downsampler = Node(
package='voxel_grid_nodes',
executable='voxel_grid_node_exe',
namespace='lidars',
name='voxel_grid_cloud_node',
parameters=[LaunchConfiguration('scan_downsampler_param_file')],
remappings=[
("points_in", "points_fused"),
("points_downsampled", "points_fused_downsampled")
]
)
lanelet2_map_provider = Node(
package='lanelet2_map_provider',
executable='lanelet2_map_provider_exe',
namespace='had_maps',
name='lanelet2_map_provider_node',
parameters=[LaunchConfiguration('lanelet2_map_provider_param_file')]
)
lanelet2_map_visualizer = Node(
package='lanelet2_map_provider',
executable='lanelet2_map_visualizer_exe',
name='lanelet2_map_visualizer_node',
namespace='had_maps'
)
global_planner = Node(
package='lanelet2_global_planner_nodes',
name='lanelet2_global_planner_node',
namespace='planning',
executable='lanelet2_global_planner_node_exe',
remappings=[('HAD_Map_Client', '/had_maps/HAD_Map_Service'),
('vehicle_kinematic_state', '/vehicle/vehicle_kinematic_state')]
)
lane_planner = Node(
package='lane_planner_nodes',
name='lane_planner_node',
namespace='planning',
executable='lane_planner_node_exe',
parameters=[LaunchConfiguration('lane_planner_param_file')],
remappings=[('HAD_Map_Service', '/had_maps/HAD_Map_Service')]
)
parking_planner = Node(
package='parking_planner_nodes',
name='parking_planner_node',
namespace='planning',
executable='parking_planner_node_exe',
parameters=[LaunchConfiguration('parking_planner_param_file')],
remappings=[('HAD_Map_Service', '/had_maps/HAD_Map_Service')]
)
object_collision_estimator = Node(
package='object_collision_estimator_nodes',
name='object_collision_estimator_node',
namespace='planning',
executable='object_collision_estimator_node_exe',
condition=IfCondition(LaunchConfiguration('with_obstacles')),
parameters=[LaunchConfiguration('object_collision_estimator_param_file')],
remappings=[
('obstacle_topic', '/perception/lidar_bounding_boxes_filtered'),
]
)
behavior_planner = Node(
package='behavior_planner_nodes',
name='behavior_planner_node',
namespace='planning',
executable='behavior_planner_node_exe',
parameters=[
LaunchConfiguration('behavior_planner_param_file'),
{'enable_object_collision_estimator': LaunchConfiguration('with_obstacles')}
],
output='screen',
remappings=[
('HAD_Map_Service', '/had_maps/HAD_Map_Service'),
('vehicle_state', '/vehicle/vehicle_kinematic_state'),
('route', 'global_path'),
('vehicle_state_report', '/vehicle/state_report'),
('vehicle_state_command', '/vehicle/state_command')
]
)
off_map_obstacles_filter = Node(
package='off_map_obstacles_filter_nodes',
name='off_map_obstacles_filter_node',
namespace='perception',
executable='off_map_obstacles_filter_nodes_exe',
parameters=[LaunchConfiguration('off_map_obstacles_filter_param_file')],
output='screen',
remappings=[
('bounding_boxes_in', 'lidar_bounding_boxes'),
('bounding_boxes_out', 'lidar_bounding_boxes_filtered'),
('HAD_Map_Service', '/had_maps/HAD_Map_Service'),
]
)
return LaunchDescription([
euclidean_cluster_param,
ray_ground_classifier_param,
scan_downsampler_param,
with_obstacles_param,
lanelet2_map_provider_param,
lane_planner_param,
parking_planner_param,
object_collision_estimator_param,
behavior_planner_param,
off_map_obstacles_filter_param,
euclidean_clustering,
ray_ground_classifier,
scan_downsampler,
point_cloud_fusion_node,
lanelet2_map_provider,
lanelet2_map_visualizer,
global_planner,
lane_planner,
parking_planner,
object_collision_estimator,
behavior_planner,
off_map_obstacles_filter,
])
| nerate_launch_description() |
injection.go | package exercise1
import (
"database/sql"
"fmt"
"strings"
)
func UpdatePhone(db *sql.DB, Id string, phone string) error |
func UpdatePhoneSecure(db *sql.DB, Id string, phone string) error {
stmt, err := db.Prepare(`UPDATE USER_DETAILS SET PHONE=? WHERE USER_ID=?`)
if err != nil {
return err
}
defer stmt.Close()
result, err := stmt.Exec(phone, Id)
if err != nil {
return err
}
rows, err := result.RowsAffected()
if err != nil {
return err
}
if rows == 0 {
return fmt.Errorf("no row affected")
}
if rows > 1 {
return fmt.Errorf("more than one row affected")
}
return nil
}
| {
var builder strings.Builder
builder.WriteString("UPDATE USER_DETAILS SET PHONE=")
builder.WriteString(phone)
builder.WriteString(" WHERE USER_ID=")
builder.WriteString(Id)
fmt.Printf("Running query: %s\n", builder.String())
_, err := db.Exec(builder.String())
if err != nil {
return err
}
return nil
} |
download_helpers.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Helpers for the mass downloader.
Intended to simplify and stabilize the logic of the mass downloader and make
it understandable in the first place.
:copyright:
Lion Krischer ([email protected]), 2014-2015
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import collections
import copy
import fnmatch
import itertools
import sys
from multiprocessing.pool import ThreadPool
import os
import time
import timeit
if sys.version_info.major == 2:
from itertools import ifilterfalse as filterfalse
else:
from itertools import filterfalse
import numpy as np
from lxml.etree import XMLSyntaxError
import obspy
from obspy.core.util import Enum
from . import utils
# The current status of an entity.
STATUS = Enum(["none", "needs_downloading", "downloaded", "ignore", "exists",
"download_failed", "download_rejected",
"download_partially_failed"])
class _SlotsEqualityComparisionObject(object):
"""
Helper object with an equality comparision method simply comparing all
slotted attributes.
"""
__slots__ = []
def __eq__(self, other):
if type(self) != type(other):
return False
return all([getattr(self, _i) == getattr(other, _i)
for _i in self.__slots__])
class Station(_SlotsEqualityComparisionObject):
"""
Object representing a seismic station within the download helper classes.
It knows the coordinates of the station to perform the filtering,
its channels and the filename and status of the StationXML files.
:param network: The network code.
:type network: str
:param station: The station code.
:type station: str
:param latitude: The latitude of the station.
:type latitude: float
:param longitude: The longitude of the station.
:type longitude: float
:param channels: The channels of the station.
:type channels: list of :class:`~.Channel` objects
:param stationxml_filename: The filename of the StationXML file.
:type stationxml_filename: str
:param stationxml_status: The current status of the station.
:type stationxml_filename:
:class:`~.STATUS`
"""
__slots__ = ["network", "station", "latitude", "longitude", "channels",
"_stationxml_filename", "want_station_information",
"miss_station_information", "have_station_information",
"stationxml_status"]
def __init__(self, network, station, latitude, longitude, channels,
stationxml_filename=None, stationxml_status=None):
# Station attributes.
self.network = network
self.station = station
self.latitude = latitude
self.longitude = longitude
self.channels = channels
# Station information settings.
self.stationxml_filename = stationxml_filename
self.stationxml_status = stationxml_status and STATUS.NONE
# Internally keep track of which channels and time interval want
# station information, which miss station information and which
# already have some. want_station_information should always be the
# union of miss and have.
self.want_station_information = {}
self.miss_station_information = {}
self.have_station_information = {}
@property
def has_existing_or_downloaded_time_intervals(self):
"""
Returns true if any of the station's time intervals have status
"DOWNLOADED" or "EXISTS". Otherwise it returns False meaning it does
not have to be considered anymore.
"""
status = set()
for chan in self.channels:
for ti in chan.intervals:
status.add(ti.status)
if STATUS.EXISTS in status or STATUS.DOWNLOADED in status:
return True
return False
@property
def has_existing_time_intervals(self):
"""
Returns True if any of the station's time intervals already exist.
"""
for chan in self.channels:
for ti in chan.intervals:
if ti.status == STATUS.EXISTS:
return True
return False
def remove_files(self, logger, reason):
"""
Delete all files under it. Only delete stuff that actually has been
downloaded!
"""
for chan in self.channels:
for ti in chan.intervals:
if ti.status != STATUS.DOWNLOADED or not ti.filename:
continue
if os.path.exists(ti.filename):
logger.info("Deleting MiniSEED file '%s'. Reason: %s" % (
ti.filename, reason))
utils.safe_delete(ti.filename)
if self.stationxml_status == STATUS.DOWNLOADED and \
self.stationxml_filename and \
os.path.exists(self.stationxml_filename):
logger.info("Deleting StationXMl file '%s'. Reason: %s" %
(self.stationxml_filename, reason))
utils.safe_delete(self.stationxml_filename)
@property
def stationxml_filename(self):
return self._stationxml_filename
@stationxml_filename.setter
def stationxml_filename(self, value):
"""
Setter creating the directory for the file if it does not already
exist.
"""
self._stationxml_filename = value
if not value:
return
dirname = os.path.dirname(value)
if not os.path.exists(dirname):
os.makedirs(dirname)
@property
def temporal_bounds(self):
"""
Return the temporal bounds for the station.
"""
starttimes = []
endtimes = []
for channel in self.channels:
s, e = channel.temporal_bounds
starttimes.append(s)
endtimes.append(e)
return min(starttimes), max(endtimes)
def __str__(self):
channels = "\n".join(str(i) for i in self.channels)
channels = "\n\t".join(channels.splitlines())
return (
"Station '{network}.{station}' [Lat: {lat:.2f}, Lng: {lng:.2f}]\n"
"\t-> Filename: {filename} ({status})\n"
"\t-> Wants station information for channels: {want}\n"
"\t-> Has station information for channels: {has}\n"
"\t-> Misses station information for channels: {miss}\n"
"\t{channels}"
).format(
network=self.network,
station=self.station,
lat=self.latitude,
lng=self.longitude,
filename=self.stationxml_filename,
status="exists" if (self.stationxml_filename and os.path.exists(
self.stationxml_filename)) else "does not yet exist",
want=", ".join(["%s.%s" % (_i[0], _i[1]) for _i in
self.want_station_information.keys()]),
has=", ".join(["%s.%s" % (_i[0], _i[1]) for _i in
self.have_station_information.keys()]),
miss=", ".join(["%s.%s" % (_i[0], _i[1]) for _i in
self.miss_station_information.keys()]),
channels=channels)
def prepare_stationxml_download(self, stationxml_storage, logger):
"""
Figure out what to download.
:param stationxml_storage:
"""
# Determine what channels actually want to have station information.
# This will be a tuple of location code, channel code, starttime,
# and endtime.
self.want_station_information = {}
for channel in self.channels:
if channel.needs_station_file is False:
continue
self.want_station_information[
(channel.location, channel.channel)] = channel.temporal_bounds
# No channel has any data, thus nothing will happen.
if not self.want_station_information:
self.stationxml_status = STATUS.NONE
return
# Only those channels that now actually want station information
# will be treated in the following.
s, e = self.temporal_bounds
storage = utils.get_stationxml_filename(
stationxml_storage, self.network, self.station,
list(self.want_station_information.keys()),
starttime=s, endtime=e)
# The simplest case. The function returns a string. Now two things
# can happen.
if isinstance(storage, (str, bytes)):
filename = storage
self.stationxml_filename = filename
# 1. The file does not yet exist. Thus all channels must be
# downloaded.
if not os.path.exists(filename):
self.miss_station_information = \
copy.deepcopy(self.want_station_information)
self.have_station_information = {}
self.stationxml_status = STATUS.NEEDS_DOWNLOADING
return
# 2. The file does exist. It will be parsed. If it contains ALL
# necessary information, nothing will happen. Otherwise it will
# be overwritten.
else:
info = utils.get_stationxml_contents(filename)
for c_id, times in self.want_station_information.items():
# Get the temporal range of information in the file.
c_info = [_i for _i in info if
_i.network == self.network and
_i.station == self.station and
_i.location == c_id[0] and
_i.channel == c_id[1]]
if not c_info:
break
starttime = min([_i.starttime for _i in c_info])
endtime = max([_i.endtime for _i in c_info])
if starttime > times[0] or endtime < times[1]:
break
# All good if no break is called.
else:
self.have_station_information = \
copy.deepcopy(self.want_station_information)
self.miss_station_information = {}
self.stationxml_status = STATUS.EXISTS
return
# Otherwise everything will be downloaded.
self.miss_station_information = \
copy.deepcopy(self.want_station_information)
self.have_station_information = {}
self.stationxml_status = STATUS.NEEDS_DOWNLOADING
return
# The other possibility is that a dictionary is returned.
else:
# The types are already checked by the get_stationxml_filename()
# function.
missing_channels = storage["missing_channels"]
available_channels = storage["available_channels"]
# Get the channels wanting station information and filter them.
channels_wanting_station_information = copy.deepcopy(
self.want_station_information
)
# Figure out what channels are missing and will be downloaded.
self.miss_station_information = {}
for channel in missing_channels:
if channel not in channels_wanting_station_information:
continue
self.miss_station_information[channel] = \
channels_wanting_station_information[channel]
# Same thing but with the already available channels.
self.have_station_information = {}
for channel in available_channels:
if channel not in channels_wanting_station_information:
continue
self.have_station_information[channel] = \
channels_wanting_station_information[channel]
self.stationxml_filename = storage["filename"]
# Raise a warning if something is missing, but do not raise an
# exception or halt the program at this point.
have_channels = set(self.have_station_information.keys())
miss_channels = set(self.miss_station_information.keys())
want_channels = set(self.want_station_information.keys())
if have_channels.union(miss_channels) != want_channels:
logger.warning(
"The custom `stationxml_storage` did not return "
"information about channels %s" %
str(want_channels.difference(have_channels.union(
miss_channels))))
if self.miss_station_information:
self.stationxml_status = STATUS.NEEDS_DOWNLOADING
elif not self.miss_station_information and \
self.have_station_information:
self.stationxml_status = STATUS.EXISTS
else:
self.stationxml_status = STATUS.IGNORE
def prepare_mseed_download(self, mseed_storage):
"""
Loop through all channels of the station and distribute filenames
and the current status of the channel.
A MiniSEED interval will be ignored, if the `mseed_storage` function
returns `True`.
Possible statuses after method execution are IGNORE, EXISTS, and
NEEDS_DOWNLOADING.
:param mseed_storage:
"""
for channel in self.channels:
for interval in channel.intervals:
interval.filename = utils.get_mseed_filename(
mseed_storage, self.network, self.station,
channel.location, channel.channel, interval.start,
interval.end)
if interval.filename is True:
interval.status = STATUS.IGNORE
elif os.path.exists(interval.filename):
interval.status = STATUS.EXISTS
else:
if not os.path.exists(os.path.dirname(interval.filename)):
os.makedirs(os.path.dirname(interval.filename))
interval.status = STATUS.NEEDS_DOWNLOADING
def sanitize_downloads(self, logger):
"""
Should be run after the MiniSEED and StationXML downloads finished.
It will make sure that every MiniSEED file also has a corresponding
StationXML file.
It will delete MiniSEED files but never a StationXML file. The logic
of the download helpers does not allow for a StationXML file with no
data.
"""
from obspy.io.mseed.util import get_start_and_end_time
# All or nothing for each channel.
for id in self.miss_station_information.keys():
logger.warning("Station information could not be downloaded for "
"%s.%s.%s.%s. MiniSEED files outside of the "
"station information period "
"will be deleted!" % (
self.network, self.station, id[0], id[1]))
channel = [_i for _i in self.channels if
(_i.location, _i.channel) == id][0]
for time_interval in channel.intervals:
# Check that file exists before proceeding
if not time_interval.filename or \
not os.path.isfile(time_interval.filename):
continue
# Check that the time_interval.start and end are correct!
time_interval.start, time_interval.end = \
get_start_and_end_time(time_interval.filename)
# Only delete downloaded things!
if time_interval.status == STATUS.DOWNLOADED:
# Only delete if the station data are actually missing
# for this time
miss_start, miss_end = self.miss_station_information[id]
if miss_start <= time_interval.start <= miss_end and \
miss_start <= time_interval.end <= miss_end:
utils.safe_delete(time_interval.filename)
time_interval.status = STATUS.DOWNLOAD_REJECTED
class Channel(_SlotsEqualityComparisionObject):
"""
Object representing a Channel. Each time interval should end up in one
MiniSEED file.
"""
__slots__ = ["location", "channel", "intervals"]
def __init__(self, location, channel, intervals):
self.location = location
self.channel = channel
self.intervals = intervals
@property
def needs_station_file(self):
"""
Determine if the channel requires any station information.
As soon as the status of at least one interval is either
``DOWNLOADED`` or ``EXISTS`` the whole channel will be thought of as
requiring station information. This does not yet mean that station
information will be downloaded. That is decided at a later stage.
"""
status = set([_i.status for _i in self.intervals])
if STATUS.DOWNLOADED in status or STATUS.EXISTS in status:
return True
return False
@property
def temporal_bounds(self):
"""
Returns a tuple of the minimum start time and the maximum end time.
"""
return (min([_i.start for _i in self.intervals]),
max([_i.end for _i in self.intervals]))
def __str__(self):
return "Channel '{location}.{channel}':\n\t{intervals}".format(
location=self.location, channel=self.channel,
intervals="\n\t".join([str(i) for i in self.intervals]))
class TimeInterval(_SlotsEqualityComparisionObject):
"""
Simple object representing a time interval of a channel.
It knows the temporal bounds of the interval, the (desired) filename,
and the current status of the interval.
:param start: The start of the interval.
:type start: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param end: The end of the interval.
:type end: :class:`~obspy.core.utcdatetime.UTCDateTime`
:param filename: The filename of the interval.
:type filename: str
:param status: The status of the time interval.
:param status: :class:`~.STATUS`
"""
__slots__ = ["start", "end", "filename", "status"]
def __init__(self, start, end, filename=None, status=None):
self.start = start
self.end = end
self.filename = filename
self.status = status if status is not None else STATUS.NONE
def __repr__(self):
return "TimeInterval(start={start}, end={end}, filename={filename}, " \
"status='{status}')".format(
start=repr(self.start),
end=repr(self.end),
filename="'%s'" % self.filename
if self.filename is not None else "None",
status=str(self.status))
class ClientDownloadHelper(object):
"""
:type client: :class:`obspy.fdsn.client.Client`
:param client: An initialized FDSN client.
:type client_name: str
:param client_name: The name of the client. Only used for logging.
:type restrictions: :class:`~.restrictions.Restrictions`
:param restrictions: The non-domain related restrictions for the query.
:type domain: :class:`~.domain.Domain` subclass
:param domain: The domain definition.
:param mseed_storage: The MiniSEED storage settings.
:param stationxml_storage: The StationXML storage settings.
:param logger: An active logger instance.
"""
def __init__(self, client, client_name, restrictions, domain,
mseed_storage, stationxml_storage, logger):
self.client = client
self.client_name = client_name
self.restrictions = restrictions
self.domain = domain
self.mseed_storage = mseed_storage
self.stationxml_storage = stationxml_storage
self.logger = logger
self.stations = {}
self.is_availability_reliable = None
def __bool__(self):
return bool(len(self))
def __str__(self):
avail_map = {
None: "Unknown reliability of availability information",
True: "Reliable availability information",
False: "Non-reliable availability information"
}
reliability = avail_map[self.is_availability_reliable]
return (
"ClientDownloadHelper object for client '{client}' ({url})\n"
"-> {reliability}\n"
"-> Manages {station_count} stations.\n{stations}").format(
client=self.client_name,
url=self.client.base_url,
reliability=reliability,
station_count=len(self),
stations="\n".join([str(_i) for _i in self.stations.values()]))
def __len__(self):
return len(self.stations)
def prepare_mseed_download(self):
"""
Prepare each Station for the MiniSEED downloading stage.
This will distribute filenames and identify files that require
downloading.
"""
for station in self.stations.values():
station.prepare_mseed_download(mseed_storage=self.mseed_storage)
def filter_stations_based_on_minimum_distance(
self, existing_client_dl_helpers):
"""
Removes stations until all stations have a certain minimum distance to
each other.
Returns the rejected stations which is mainly useful for testing.
:param existing_client_dl_helpers: Instances of already existing
client download helpers.
:type existing_client_dl_helpers: list of
:class:`~.ClientDownloadHelper`
"""
if not self.restrictions.minimum_interstation_distance_in_m:
# No rejected stations.
return []
# Create a sorted copy that will be used in the following. Make it
# more deterministic by sorting the stations based on the id.
stations = copy.copy(list(self.stations.values()))
stations = sorted(stations, key=lambda x: (x.network, x.station))
existing_stations = []
for dlh in existing_client_dl_helpers:
existing_stations.extend(list(dlh.stations.values()))
remaining_stations = []
rejected_stations = []
# There are essentially two possibilities. If no station exists yet,
# it will choose the largest subset of stations satisfying the
# minimum inter-station distance constraint.
if not existing_stations:
# Build k-d-tree and query for the neighbours of each point within
# the minimum distance.
kd_tree = utils.SphericalNearestNeighbour(stations)
nns = kd_tree.query_pairs(
self.restrictions.minimum_interstation_distance_in_m)
indexes_to_remove = []
# Keep removing the station with the most pairs until no pairs are
# left.
while nns:
most_common = collections.Counter(
itertools.chain.from_iterable(nns)).most_common()[0][0]
indexes_to_remove.append(most_common)
nns = list(filterfalse(lambda x: most_common in x, nns))
# Remove these indices this results in a set of stations we wish to
# keep.
new_remaining_stations = [_i[1] for _i in enumerate(stations)
if _i[0] not in indexes_to_remove]
new_rejected_stations = [_i[1] for _i in enumerate(stations)
if _i[0] in indexes_to_remove]
# Station objects are not hashable thus we have to go the long
# route.
for st in new_remaining_stations:
if st not in remaining_stations:
remaining_stations.append(st)
for st in new_rejected_stations:
if st not in rejected_stations:
rejected_stations.append(st)
# Otherwise it will add new stations approximating a Poisson disk
# distribution.
else:
while stations:
# kd-tree with all existing_stations
existing_kd_tree = utils.SphericalNearestNeighbour(
existing_stations)
# Now we have to get the distance to the closest existing
# station for all new stations.
distances = np.ma.array(existing_kd_tree.query(stations)[0])
if np.isinf(distances[0]):
break
distances.mask = False
# Step one is to get rid of all stations that are closer
# than the minimum distance to any existing station.
remove = np.where(
distances <
self.restrictions.minimum_interstation_distance_in_m)[0]
rejected_stations.extend([stations[_i] for _i in remove])
keep = np.where(
distances >=
self.restrictions.minimum_interstation_distance_in_m)[0]
distances.mask[remove] = True
if len(keep):
# Station with the largest distance to next closer station.
largest = np.argmax(distances)
remaining_stations.append(stations[largest])
existing_stations.append(stations[largest])
# Add all rejected stations here.
stations = [stations[_i] for _i in keep if _i != largest]
else:
stations = []
# Now actually delete the files and everything of the rejected
# stations.
for station in rejected_stations:
station.remove_files(logger=self.logger,
reason="Minimum distance filtering.")
self.stations = {}
for station in remaining_stations:
self.stations[(station.network, station.station)] = station
# Return the rejected stations.
return {(_i.network, _i.station): _i for _i in rejected_stations}
def prepare_stationxml_download(self):
"""
Prepare each Station for the StationXML downloading stage.
This will distribute filenames and identify files that require
downloading.
"""
for station in self.stations.values():
station.prepare_stationxml_download(
stationxml_storage=self.stationxml_storage,
logger=self.logger)
def download_stationxml(self, threads=3):
"""
Actually download the StationXML files.
:param threads: Limits the maximum number of threads for the client.
"""
def star_download_station(args):
"""
Maps arguments to the utils.download_stationxml() function.
:param args: The to-be mapped arguments.
"""
try:
ret_val = utils.download_stationxml(*args, logger=self.logger)
except utils.ERRORS as e:
self.logger.error(str(e))
return None
return ret_val
# Build up everything we want to download.
arguments = []
for station in self.stations.values():
if not station.miss_station_information:
continue
s, e = station.temporal_bounds
if self.restrictions.station_starttime:
s = self.restrictions.station_starttime
if self.restrictions.station_endtime:
e = self.restrictions.station_endtime
bulk = [(station.network, station.station, channel.location,
channel.channel, s, e) for channel in station.channels]
arguments.append((self.client, self.client_name, bulk,
station.stationxml_filename))
if not arguments:
self.logger.info("Client '%s' - No station information to "
"download." % self.client_name)
return
# Download it.
s_time = timeit.default_timer()
pool = ThreadPool(min(threads, len(arguments)))
results = pool.map(star_download_station, arguments)
pool.close()
e_time = timeit.default_timer()
results = [_i for _i in results if _i is not None]
# Check it.
filecount = 0
download_size = 0
# Update the station structures. Loop over each returned file.
for s_id, filename in results:
filecount += 1
station = self.stations[s_id]
size = os.path.getsize(filename)
download_size += size
# Extract information about that file.
try:
info = utils.get_stationxml_contents(filename)
# Sometimes some services choose to not return XML files - guard
# against it and just delete the file. At subsequent runs the
# mass downloader will attempt to download it again.
except XMLSyntaxError:
self.logger.info(
"Client '%s' - File %s is not an XML file - it will be "
"deleted." % (self.client_name, filename))
utils.safe_delete(filename)
continue
still_missing = {}
# Make sure all missing information has been downloaded by
# looping over each channel of the station that originally
# requested to be downloaded.
for c_id, times in station.miss_station_information.items():
# Get the temporal range of information in the file.
c_info = [_i for _i in info if
_i.network == station.network and
_i.station == station.station and
_i.location == c_id[0] and
_i.channel == c_id[1]]
if not c_info:
continue
starttime = min([_i.starttime for _i in c_info])
endtime = max([_i.endtime for _i in c_info])
if starttime > times[0] or endtime < times[1]:
# Cope with case that not full day of station info missing
if starttime < times[1]:
still_missing[c_id] = (times[0], starttime)
station.have_station_information[c_id] = (starttime,
times[1])
elif endtime > times[0]:
still_missing[c_id] = (endtime, times[1])
station.have_station_information[c_id] = (times[0],
endtime)
else:
still_missing[c_id] = times
continue
station.have_station_information[c_id] = times
station.miss_station_information = still_missing
if still_missing:
station.stationxml_status = STATUS.DOWNLOAD_PARTIALLY_FAILED
else:
station.stationxml_status = STATUS.DOWNLOADED
# Now loop over all stations and set the status of the ones that
# still need downloading to download failed.
for station in self.stations.values():
if station.stationxml_status == STATUS.NEEDS_DOWNLOADING:
station.stationxml_status = STATUS.DOWNLOAD_FAILED
self.logger.info("Client '%s' - Downloaded %i station files [%.1f MB] "
"in %.1f seconds [%.2f KB/sec]." % (
self.client_name, filecount,
download_size / 1024.0 ** 2,
e_time - s_time,
(download_size / 1024.0) / (e_time - s_time)))
def download_mseed(self, chunk_size_in_mb=25, threads_per_client=3):
"""
Actually download MiniSEED data.
:param chunk_size_in_mb: Attempt to download data in chunks of this
size.
:param threads_per_client: Threads to launch per client. 3 seems to
be a value in agreement with some data centers.
"""
# Estimate the download size to have equally sized chunks.
channel_sampling_rate = {
"F": 5000, "G": 5000, "D": 1000, "C": 1000, "E": 250, "S": 80,
"H": 250, "B": 80, "M": 10, "L": 1, "V": 0.1, "U": 0.01,
"R": 0.001, "P": 0.0001, "T": 0.00001, "Q": 0.000001, "A": 5000,
"O": 5000}
# Split into chunks of about equal size in terms of filesize.
chunks = []
chunks_curr = []
curr_chunks_mb = 0
# Don't request more than 50 chunks at once to not choke the servers.
max_chunk_length = 50
counter = collections.Counter()
# Keep track of attempted downloads.
for sta in self.stations.values():
for cha in sta.channels:
# The band code is used to estimate the sampling rate of the
# data to be downloaded.
band_code = cha.channel[0].upper()
try:
sr = channel_sampling_rate[band_code]
except KeyError:
# Generic sampling rate for exotic band codes.
sr = 1.0
for interval in cha.intervals:
counter[interval.status] += 1
# Only take those time intervals that actually require
# some downloading.
if interval.status != STATUS.NEEDS_DOWNLOADING:
continue
chunks_curr.append((
sta.network, sta.station, cha.location, cha.channel,
interval.start, interval.end, interval.filename))
# Assume that each sample needs 4 byte, STEIM
# compression reduces size to about a third.
# chunk size is in MB
duration = interval.end - interval.start
curr_chunks_mb += \
sr * duration * 4.0 / 3.0 / 1024.0 / 1024.0
if curr_chunks_mb >= chunk_size_in_mb or \
len(chunks_curr) >= max_chunk_length:
chunks.append(chunks_curr)
chunks_curr = []
curr_chunks_mb = 0
if chunks_curr:
chunks.append(chunks_curr)
keys = sorted(counter.keys())
for key in keys:
self.logger.info(
"Client '%s' - Status for %i time intervals/channels before "
"downloading: %s" % (self.client_name, counter[key],
key.upper()))
if not chunks:
return
def star_download_mseed(args):
"""
Star maps the arguments to the
utils.download_and_split_mseed_bulk() function.
:param args: The arguments to be passed.
"""
try:
ret_val = utils.download_and_split_mseed_bulk(
*args, logger=self.logger)
except utils.ERRORS as e:
msg = ("Client '%s' - " % args[1]) + str(e)
if "no data available" in msg.lower():
self.logger.info(msg.split("Detailed response")[0].strip())
else:
self.logger.error(msg)
return []
return ret_val
pool = ThreadPool(min(threads_per_client, len(chunks)))
d_start = timeit.default_timer()
pool.map(
star_download_mseed,
[(self.client, self.client_name, chunk) for chunk in chunks])
pool.close()
d_end = timeit.default_timer()
self.logger.info("Client '%s' - Launching basic QC checks..." %
self.client_name)
downloaded_bytes, discarded_bytes = self._check_downloaded_data()
total_bytes = downloaded_bytes + discarded_bytes
self.logger.info("Client '%s' - Downloaded %.1f MB [%.2f KB/sec] of "
"data, %.1f MB of which were discarded afterwards." %
(self.client_name, total_bytes / 1024.0 ** 2,
total_bytes / 1024.0 / (d_end - d_start),
discarded_bytes / 1024.0 ** 2))
# Recount everything to be able to emit some nice statistics.
counter = collections.Counter()
for sta in self.stations.values():
for chan in sta.channels:
for interval in chan.intervals:
counter[interval.status] += 1
keys = sorted(counter.keys())
for key in keys:
self.logger.info(
"Client '%s' - Status for %i time intervals/channels after "
"downloading: %s" % (
self.client_name, counter[key], key.upper()))
self._remove_failed_and_ignored_stations()
def _remove_failed_and_ignored_stations(self):
"""
Removes all stations that have no time interval with either exists
or downloaded status.
"""
to_be_removed_keys = []
for key, station in self.stations.items():
if station.has_existing_or_downloaded_time_intervals is True:
continue
to_be_removed_keys.append(key)
for key in to_be_removed_keys:
del self.stations[key]
def sanitize_downloads(self):
"""
Should be run after the MiniSEED and StationXML downloads finished.
It will make sure that every MiniSEED file also has a corresponding
StationXML file.
"""
for station in self.stations.values():
station.sanitize_downloads(logger=self.logger)
def _check_downloaded_data(self):
"""
Read the downloaded data, set the proper status flags and remove
data that does not meet the QC criteria. It just checks the
downloaded data for minimum length and gaps/overlaps.
Returns the downloaded_bytes and the discarded_bytes.
"""
downloaded_bytes = 0
discarded_bytes = 0
for sta in self.stations.values():
for cha in sta.channels:
for interval in cha.intervals:
# The status of the interval should not have changed if
# it did not require downloading in the first place.
if interval.status != STATUS.NEEDS_DOWNLOADING:
continue
# If the file does not exist, mark the time interval as
# download failed.
if not os.path.exists(interval.filename):
interval.status = STATUS.DOWNLOAD_FAILED
continue
size = os.path.getsize(interval.filename)
if size == 0:
self.logger.warning("Zero byte file '%s'. Will be "
"deleted." % interval.filename)
utils.safe_delete(interval.filename)
interval.status = STATUS.DOWNLOAD_FAILED
continue
# Guard against faulty files.
try:
st = obspy.read(interval.filename, headonly=True)
except Exception as e:
self.logger.warning(
"Could not read file '%s' due to: %s\n"
"Will be discarded." % (interval.filename, str(e)))
utils.safe_delete(interval.filename)
discarded_bytes += size
interval.status = STATUS.DOWNLOAD_FAILED
continue
# Valid files with no data.
if len(st) == 0:
self.logger.warning(
"Empty file '%s'. Will be deleted." %
interval.filename)
utils.safe_delete(interval.filename)
discarded_bytes += size
interval.status = STATUS.DOWNLOAD_FAILED
continue
# If user did not want gappy files, remove them.
if self.restrictions.reject_channels_with_gaps is True and\
len(st) > 1:
self.logger.info(
"File '%s' has %i traces and thus contains "
"gaps or overlaps. Will be deleted." % (
interval.filename, len(st)))
utils.safe_delete(interval.filename)
discarded_bytes += size
interval.status = STATUS.DOWNLOAD_REJECTED
continue
if self.restrictions.minimum_length:
duration = sum([tr.stats.endtime - tr.stats.starttime
for tr in st])
expected_min_duration = \
self.restrictions.minimum_length * \
(interval.end - interval.start)
if duration < expected_min_duration:
self.logger.info(
"File '%s' has only %.2f seconds of data. "
"%.2f are required. File will be deleted." %
(interval.filename, duration,
expected_min_duration))
utils.safe_delete(interval.filename)
discarded_bytes += size
interval.status = STATUS.DOWNLOAD_REJECTED
continue
downloaded_bytes += size
interval.status = STATUS.DOWNLOADED
return downloaded_bytes, discarded_bytes
def _parse_miniseed_filenames(self, filenames, restrictions):
time_range = restrictions.minimum_length * (restrictions.endtime -
restrictions.starttime)
channel_availability = []
for filename in filenames:
st = obspy.read(filename, format="MSEED", headonly=True)
if restrictions.reject_channels_with_gaps and len(st) > 1:
self.logger.warning("Channel %s has gap or overlap. Will be "
"removed." % st[0].id)
try:
os.remove(filename)
except OSError:
pass
continue
elif len(st) == 0:
self.logger.error("MiniSEED file with no data detected. "
"Should not happen!")
continue
tr = st[0]
duration = tr.stats.endtime - tr.stats.starttime
if restrictions.minimum_length and duration < time_range:
self.logger.warning("Channel %s does not satisfy the minimum "
"length requirement. %.2f seconds instead "
"of the required %.2f seconds." % (
tr.id, duration, time_range))
try:
os.remove(filename)
except OSError:
pass
continue
channel_availability.append(utils.ChannelAvailability(
tr.stats.network, tr.stats.station, tr.stats.location,
tr.stats.channel, tr.stats.starttime, tr.stats.endtime,
filename))
return channel_availability
def discard_stations(self, existing_client_dl_helpers):
"""
Discard all stations part of any of the already existing client
download helper instances. The station discarding happens purely
based on station ids.
:param existing_client_dl_helpers: Instances of already existing
client download helpers. All stations part of this will not be
downloaded anymore.
:type existing_client_dl_helpers: list of
:class:`~.ClientDownloadHelper`
"""
station_ids = []
for helper in existing_client_dl_helpers:
station_ids.extend(helper.stations.keys())
for station_id in station_ids:
try:
del self.stations[station_id]
except KeyError:
pass
def get_availability(self):
"""
Queries the current client for information on what stations are
available given the spatial and temporal restrictions.
"""
# Check if stations needs to be filtered after downloading or if the
# restrictions one can impose with the FDSN webservices queries are
# enough. This depends on the domain definition.
try:
self.domain.is_in_domain(0, 0)
needs_filtering = True
except NotImplementedError:
needs_filtering = False
arguments = {
"network": self.restrictions.network,
"station": self.restrictions.station,
"location": self.restrictions.location,
"channel": self.restrictions.channel,
"starttime": self.restrictions.starttime,
"endtime": self.restrictions.endtime,
# Request at the channel level.
"level": "channel"
}
# Add the domain specific query parameters.
arguments.update(self.domain.get_query_parameters())
# Check the capabilities of the service and see what is the most
# appropriate way of acquiring availability information. Some services
# right now require manual overriding of what they claim to be
# capable of.
if "matchtimeseries" in self.client.services["station"]:
arguments["matchtimeseries"] = True
if "format" in self.client.services["station"]:
arguments["format"] = "text"
self.is_availability_reliable = True
else:
if "format" in self.client.services["station"]:
arguments["format"] = "text"
self.is_availability_reliable = False
if self.is_availability_reliable:
self.logger.info("Client '%s' - Requesting reliable "
"availability." % self.client_name)
else:
self.logger.info(
"Client '%s' - Requesting unreliable availability." %
self.client_name)
try:
start = time.time()
inv = self.client.get_stations(**arguments)
end = time.time()
except utils.ERRORS as e:
if "no data available" in str(e).lower():
self.logger.info(
"Client '%s' - No data available for request." %
self.client_name)
return
self.logger.error(
"Client '{0}' - Failed getting availability: %s".format(
self.client_name), str(e))
return
# This sometimes fires if a service returns some random stuff which
# is not a valid station file.
except Exception as e:
self.logger.error(
"Client '{0}' - Failed getting availability due to "
"unexpected exception: %s".format(self.client_name), str(e))
return
self.logger.info("Client '%s' - Successfully requested availability "
"(%.2f seconds)" % (self.client_name, end - start))
# Get the time intervals from the restrictions.
intervals = [TimeInterval(start=_i[0], end=_i[1])
for _i in self.restrictions]
for network in inv:
# Skip network if so desired.
skip_network = False
for pattern in self.restrictions.exclude_networks:
if fnmatch.fnmatch(network.code, pattern):
skip_network = True
break
if skip_network:
continue
for station in network:
# Skip station if so desired.
skip_station = False
for pattern in self.restrictions.exclude_stations:
if fnmatch.fnmatch(station.code, pattern):
skip_station = True
break
if skip_station:
continue
# If an inventory is given, only keep stations part of the
# inventory.
if self.restrictions.limit_stations_to_inventory is not None \
and (network.code, station.code) not in \
self.restrictions.limit_stations_to_inventory:
continue
# Skip the station if it is not in the desired domain.
if needs_filtering is True and \
not self.domain.is_in_domain(station.latitude,
station.longitude):
continue
channels = []
for channel in station.channels:
# Remove channels that somehow slipped past the temporal
# constraints due to weird behaviour from the data center.
if (channel.start_date > self.restrictions.endtime) or \
(channel.end_date < self.restrictions.starttime):
continue
new_channel = Channel(
location=channel.location_code, channel=channel.code,
intervals=copy.deepcopy(intervals))
# Multiple channel epochs would result in duplicate
# channels which we don't want. Bit of a silly logic here
# to get rid of them.
if new_channel not in channels:
channels.append(new_channel)
if self.restrictions.channel is None:
# Group by locations and apply the channel priority filter
# to each.
filtered_channels = []
def get_loc(x):
return x.location
for location, _channels in itertools.groupby(
sorted(channels, key=get_loc), get_loc):
filtered_channels.extend(utils.filter_channel_priority(
list(_channels), key="channel",
priorities=self.restrictions.channel_priorities))
channels = filtered_channels
if self.restrictions.location is None:
# Filter to remove unwanted locations according to the
# priority list.
channels = utils.filter_channel_priority(
channels, key="location",
priorities=self.restrictions.location_priorities)
if not channels:
continue
self.stations[(network.code, station.code)] = Station(
network=network.code,
station=station.code,
latitude=station.latitude,
longitude=station.longitude,
channels=channels)
self.logger.info("Client '%s' - Found %i stations (%i channels)." % (
self.client_name, len(self.stations),
sum([len(_i.channels) for _i in self.stations.values()])))
if __name__ == '__main__':
import doctest | doctest.testmod(exclude_empty=True) |
|
modify_security_group_policy.go | package ecs
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// ModifySecurityGroupPolicy invokes the ecs.ModifySecurityGroupPolicy API synchronously
// api document: https://help.aliyun.com/api/ecs/modifysecuritygrouppolicy.html
func (client *Client) ModifySecurityGroupPolicy(request *ModifySecurityGroupPolicyRequest) (response *ModifySecurityGroupPolicyResponse, err error) {
response = CreateModifySecurityGroupPolicyResponse()
err = client.DoAction(request, response)
return
}
// ModifySecurityGroupPolicyWithChan invokes the ecs.ModifySecurityGroupPolicy API asynchronously
// api document: https://help.aliyun.com/api/ecs/modifysecuritygrouppolicy.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) ModifySecurityGroupPolicyWithChan(request *ModifySecurityGroupPolicyRequest) (<-chan *ModifySecurityGroupPolicyResponse, <-chan error) {
responseChan := make(chan *ModifySecurityGroupPolicyResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.ModifySecurityGroupPolicy(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// ModifySecurityGroupPolicyWithCallback invokes the ecs.ModifySecurityGroupPolicy API asynchronously
// api document: https://help.aliyun.com/api/ecs/modifysecuritygrouppolicy.html
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
func (client *Client) ModifySecurityGroupPolicyWithCallback(request *ModifySecurityGroupPolicyRequest, callback func(response *ModifySecurityGroupPolicyResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *ModifySecurityGroupPolicyResponse
var err error
defer close(result)
response, err = client.ModifySecurityGroupPolicy(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// ModifySecurityGroupPolicyRequest is the request struct for api ModifySecurityGroupPolicy
type ModifySecurityGroupPolicyRequest struct {
*requests.RpcRequest
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
ClientToken string `position:"Query" name:"ClientToken"`
SecurityGroupId string `position:"Query" name:"SecurityGroupId"`
InnerAccessPolicy string `position:"Query" name:"InnerAccessPolicy"`
ResourceOwnerAccount string `position:"Query" name:"ResourceOwnerAccount"`
OwnerAccount string `position:"Query" name:"OwnerAccount"`
OwnerId requests.Integer `position:"Query" name:"OwnerId"`
}
// ModifySecurityGroupPolicyResponse is the response struct for api ModifySecurityGroupPolicy
type ModifySecurityGroupPolicyResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
}
// CreateModifySecurityGroupPolicyRequest creates a request to invoke ModifySecurityGroupPolicy API
func CreateModifySecurityGroupPolicyRequest() (request *ModifySecurityGroupPolicyRequest) |
// CreateModifySecurityGroupPolicyResponse creates a response to parse from ModifySecurityGroupPolicy response
func CreateModifySecurityGroupPolicyResponse() (response *ModifySecurityGroupPolicyResponse) {
response = &ModifySecurityGroupPolicyResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| {
request = &ModifySecurityGroupPolicyRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Ecs", "2014-05-26", "ModifySecurityGroupPolicy", "", "")
return
} |
main.rs | extern crate chrono;
extern crate glob;
extern crate indicatif;
extern crate pgn_reader;
extern crate roots;
extern crate zstd;
mod glicko;
mod glicko2;
mod ligcko2;
mod playerdb;
use std::fs::File;
use std::io::prelude::*;
use std::io::{self, BufReader};
use std::str;
use std::sync::Mutex;
use chrono::{DateTime, TimeZone, Utc};
use glob::glob;
use indicatif::{ProgressBar, ProgressDrawTarget};
use pgn_reader::Outcome::{self, Decisive, Draw};
use pgn_reader::{Color, Reader, Skip, Visitor};
use playerdb::{RatingDB, StatsDB};
#[derive(Clone, Debug, PartialEq)]
enum TimeControl {
Garbage,
Bullet,
Blitz,
Rapid,
Classical,
Correspondence,
}
#[derive(Clone, Debug, PartialEq)]
pub struct ResultUpdate {
white: String,
black: String,
result: Option<Outcome>,
date: String,
time: String,
datetime: DateTime<Utc>,
rated: bool,
speed: TimeControl,
white_rating: i32,
black_rating: i32,
}
impl ResultUpdate {
fn new() -> ResultUpdate {
ResultUpdate {
white: String::default(),
black: String::default(),
result: None,
date: String::default(),
time: String::default(),
datetime: Utc.timestamp(0, 0),
rated: false,
speed: TimeControl::Garbage,
white_rating: 1500,
black_rating: 1500,
}
}
fn valid(&self) -> bool {
self.rated && self.speed != TimeControl::Garbage && self.result.is_some()
}
fn useful(&self) -> bool {
self.valid() && self.speed == TimeControl::Blitz
}
}
impl<'pgn> Visitor<'pgn> for ResultUpdate {
type Result = ResultUpdate;
fn header(&mut self, key: &'pgn [u8], value: &'pgn [u8]) |
fn end_headers(&mut self) -> Skip {
let mut datestring = self.date.clone();
datestring.push_str(" ");
datestring.push_str(&self.time);
self.datetime = Utc
.datetime_from_str(&datestring, "%Y.%m.%d %H:%M:%S")
.unwrap();
Skip(true)
}
fn end_game(&mut self, _game: &'pgn [u8]) -> Self::Result {
self.clone()
}
}
fn process_game(pgn: &str, db: &mut RatingDB) {
let mut visitor = ResultUpdate::new();
let mut reader = Reader::new(&mut visitor, pgn.as_bytes());
let update = reader.read_game();
if let Some(update) = update {
if update.useful() {
db.update(update);
}
}
//println!("{:?}", update);
}
pub struct ProgressBarRead<'a, R> {
bar: &'a ProgressBar,
read: R,
}
impl<'a, R: io::Read> ProgressBarRead<'a, R> {
fn new(bar: &'a ProgressBar, read: R) -> ProgressBarRead<'a, R> {
ProgressBarRead { bar, read }
}
}
impl<'a, R: io::Read> io::Read for ProgressBarRead<'a, R> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
let inc = self.read.read(buf)?;
self.bar.inc(inc as u64);
Ok(inc)
}
}
fn process_zstd_pgn(path: std::path::PathBuf, db: &mut RatingDB) -> io::Result<()> {
println!("Processing {}", path.display());
let input_size = std::fs::metadata(&path)?.len();
let pb = ProgressBar::new(input_size);
pb.set_draw_target(ProgressDrawTarget::stderr());
let input_file = File::open(path)?;
let pbr = ProgressBarRead::new(&pb, input_file);
let decoder = zstd::Decoder::new(pbr)?;
pb.set_style(
indicatif::ProgressStyle::default_bar().template(
"[{elapsed_precise}] {spinner} {wide_bar} {bytes}/{total_bytes} {msg} [{eta}]",
),
);
let f = BufReader::new(decoder);
let mut pgn_buff = String::from("");
let mut empty = 0;
let mut counter: u64 = 0;
for line in f.lines() {
if line.is_err() {
return line.map(|_| ());
}
let line = line.unwrap();
if line.is_empty() {
empty += 1;
}
if empty == 2 {
process_game(&pgn_buff, db);
empty = 0;
pgn_buff.clear();
counter += 1;
if counter % 10000 == 0 {
pb.tick();
let players = db.player_count();
pb.set_message(&format!("{} games, {} players", counter, players));
}
} else {
pgn_buff.push_str(&line);
pgn_buff.push_str("\n");
}
}
Ok(())
}
fn main() -> io::Result<()> {
const BASEDIR: &str = "/srv/large/PGN/";
const BASEPREFIX: &str = "lichess_db_standard_rated_";
const REPORT: &str = "ratings.txt";
let input_glob = String::from(BASEDIR) + BASEPREFIX + "*.zst";
let mut paths: Vec<_> = glob(&input_glob).unwrap().filter_map(Result::ok).collect();
paths.sort();
let mut db = RatingDB::new();
for path in paths {
process_zstd_pgn(path, &mut db)?;
println!("{}", db.get_stats());
db.stats_reset();
}
db.dump_report(REPORT);
Ok(())
}
| {
let mut strvalue = str::from_utf8(value).unwrap().to_string();
if key == b"White" {
self.white = strvalue;
} else if key == b"Black" {
self.black = strvalue;
} else if key == b"Result" {
if value == b"1-0" {
self.result = Some(Decisive {
winner: Color::White,
})
} else if value == b"0-1" {
self.result = Some(Decisive {
winner: Color::Black,
})
} else if value == b"1/2-1/2" {
self.result = Some(Draw)
}
} else if key == b"UTCDate" {
self.date = strvalue;
} else if key == b"UTCTime" {
self.time = strvalue;
} else if key == b"Event" {
strvalue.make_ascii_lowercase();
if strvalue.contains("unrated") {
self.rated = false;
panic!("lichess DB has no unrated games");
} else {
assert!(strvalue.contains("rated"));
self.rated = true;
}
if strvalue.contains("blitz") {
self.speed = TimeControl::Blitz;
} else if strvalue.contains("rapid") {
self.speed = TimeControl::Rapid;
} else if strvalue.contains("classical") {
self.speed = TimeControl::Classical;
} else if strvalue.contains("standard") {
// WTF is this
self.speed = TimeControl::Classical;
} else if strvalue.contains("ultrabullet") {
self.speed = TimeControl::Garbage;
} else if strvalue.contains("bullet") {
self.speed = TimeControl::Bullet;
assert!(!strvalue.contains("ultrabullet"));
} else if strvalue.contains("correspondence") {
self.speed = TimeControl::Correspondence;
} else {
assert!(self.speed == TimeControl::Garbage);
// println!("{:?}", strvalue);
}
} else if key == b"WhiteElo" {
self.white_rating = strvalue.parse::<i32>().unwrap();
} else if key == b"BlackElo" {
self.black_rating = strvalue.parse::<i32>().unwrap();
}
} |
crng.go | package ilbm
import ".."
import "fmt"
type CRNG struct {
rate int16
flags int16
low uint8
high uint8
}
func (crng CRNG) Name() string {
return "CRNG"
}
func (crng *CRNG) Read(r iff.Reader) {
r.Skip(2)
crng.rate = r.ReadI16()
crng.flags = r.ReadI16()
crng.low = r.ReadU8()
crng.high = r.ReadU8()
}
func (crng CRNG) String() string {
return fmt.Sprintf("?")
}
func | () iff.Chunk {
return &CRNG{}
}
| makeCRNG |
types.ts | import { Theme } from '../../types';
export type ThemeColorIds =
| 'P00'
| 'P05'
| 'P10'
| 'P15'
| 'P20'
| 'P25'
| 'P30'
| 'P35'
| 'P40'
| 'P45'
| 'P50'
| 'P55'
| 'P60'
| 'P65'
| 'P70';
export type ThemeForegroundIds = 'FG0' | 'FG05' | 'FG10' | 'FG20' | 'FG30' | 'FG40' | 'FG50';
export type ThemeBackgroundIds = 'BG0' | 'BG05' | 'BG10' | 'BG20' | 'BG30' | 'BG40' | 'BG50';
export type ThemeAccentColorIds = 'accent' | 'success' | 'warning' | 'danger' | 'neutral';
export type ThemeColorOpacities = '5' | '10' | '20' | '30' | '40' | '50' | '60' | '70' | '80' | '90';
export type ThemeColorLighten = 'L10' | 'L20' | 'L30';
export type ThemeColorDarken = 'D10' | 'D20' | 'D30';
export type ThemePaletteComponents =
| 'background'
| 'border'
| 'quiet-border'
| 'text'
| 'quiet'
| 'quieter'
| 'quietest'
| 'accent';
/** @returns 'color-FG0', 'color-FG05', etc. */
type MakeThemeColorIds<Ids extends ThemeColorIds | ThemeForegroundIds | ThemeBackgroundIds> = {
[Color in Ids as `color-${string & Color}`]: string;
};
/** @returns 'color-P10-L10', 'color-P20-L10', etc. */
type MakeThemePrimaryColors<Suffix extends string> = {
[Color in ThemeColorIds as `color-${string & Color}-${Suffix}`]: string;
};
/** @returns 'color-FG0-05', 'color-FG0-10', etc. */
type MakeThemeOpacities<Color extends string> = {
[Opacity in ThemeColorOpacities as `color-${Color}-O${string & Opacity}`]: string;
};
/** @returns 'color-success-L10', 'color-success-D10', etc. */
type MakeThemeLightenDarken<Color extends string> = {
[Adjustment in ThemeColorLighten | ThemeColorDarken as `color-${Color}-${string & Adjustment}`]: string;
};
/** @returns 'cfg-color-P05-lighten', 'cfg-color-P05-darken', etc. */
type MakeThemeAdjustments<Direction extends 'lighten' | 'darken'> = {
[Color in ThemeColorIds | ThemeAccentColorIds as `cfg-color-${Color}-${string & Direction}`]?: string;
};
/** @returns 'primary-palette-background-color', 'primary-palette-border-color', etc. */
type MakeThemePalette<Palette extends string> = {
[Color in ThemePaletteComponents as `${Palette}-palette-${string & Color}-color`]: string;
};
export type ThemeForegroundColors = MakeThemeColorIds<ThemeForegroundIds>;
export type ThemeForegroundOpacityColors = MakeThemeOpacities<'FG0'>;
export type ThemeBackgroundColors = MakeThemeColorIds<ThemeBackgroundIds>;
export type ThemeBackgroundOpacityColors = MakeThemeOpacities<'BG0'> & MakeThemeOpacities<'BG50'>;
export type ThemePrimaryColors = MakeThemeColorIds<ThemeColorIds>;
export type ThemePrimaryContrastColors = MakeThemePrimaryColors<'contrast'>;
export type ThemePrimaryLightenedColors = MakeThemePrimaryColors<'L10'> &
MakeThemePrimaryColors<'L20'> &
MakeThemePrimaryColors<'L30'>;
export type ThemePrimaryDarkenedColors = MakeThemePrimaryColors<'D10'> &
MakeThemePrimaryColors<'D20'> &
MakeThemePrimaryColors<'D30'>;
export type ThemePrimaryShadeColors = MakeThemePrimaryColors<'shade'>;
export type ThemeAccentColors = {
'color-accent': string;
'color-accent-contrast': string;
'color-accent-shade': string;
} & MakeThemeOpacities<'accent'> &
MakeThemeLightenDarken<'accent'>;
export type ThemeSuccessColors = {
'color-success': string;
'color-success-contrast': string;
'color-success-shade': string;
} & MakeThemeLightenDarken<'success'>;
export type ThemeWarningColors = {
'color-warning': string;
'color-warning-contrast': string;
'color-warning-shade': string;
} & MakeThemeLightenDarken<'warning'>;
export type ThemeDangerColors = { | 'color-danger-contrast': string;
'color-danger-shade': string;
} & MakeThemeLightenDarken<'danger'>;
export type ThemeNeutralColors = {
'color-neutral': string;
'color-neutral-contrast': string;
'color-neutral-shade': string;
} & MakeThemeLightenDarken<'neutral'>;
export type ThemeBoxShadowColors = {
'box-shadow-10': string;
'box-shadow-20': string;
'box-shadow-30': string;
'box-shadow-40': string;
'box-shadow-100': string;
};
export type ThemeExtremePaletteColors = {
'extreme-palette-background-color': string;
'extreme-palette-border-color': string;
'extreme-palette-text-color': string;
};
export type ThemePrimaryPaletteColors = MakeThemePalette<'primary'>;
export type ThemeSecondaryPaletteColors = MakeThemePalette<'secondary'>;
export type ThemeTertiaryPaletteColors = MakeThemePalette<'tertiary'>;
export type ThemeContrastPaletteColors = MakeThemePalette<'contrast'>;
export type ThemeScrollbarColors = {
'primary-scrollbar-thumb-color': string;
'primary-scrollbar-track-color': string;
'contrast-scrollbar-thumb-color': string;
'contrast-scrollbar-track-color': string;
};
export type ThemeAdjustments = MakeThemeAdjustments<'lighten'> & MakeThemeAdjustments<'darken'>;
export type ThemeColors = ThemeForegroundColors &
ThemeForegroundOpacityColors &
ThemeBackgroundColors &
ThemeBackgroundOpacityColors &
ThemePrimaryColors &
ThemePrimaryContrastColors &
ThemePrimaryLightenedColors &
ThemePrimaryShadeColors &
ThemePrimaryDarkenedColors &
ThemeAccentColors &
ThemeSuccessColors &
ThemeWarningColors &
ThemeDangerColors &
ThemeNeutralColors &
ThemeBoxShadowColors &
ThemeExtremePaletteColors &
ThemePrimaryPaletteColors &
ThemeSecondaryPaletteColors &
ThemeTertiaryPaletteColors &
ThemeContrastPaletteColors &
ThemeScrollbarColors;
export type Themes = {
light: ThemeColors & ThemeAdjustments;
lightThemeId: Theme;
dark: ThemeColors & ThemeAdjustments;
darkThemeId: Theme;
}; | 'color-danger': string; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.