prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>htmlreader.py<|end_file_name|><|fim▁begin|># Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from HTMLParser import HTMLParser
from htmlentitydefs import entitydefs
NON_BREAKING_SPACE = u'\xA0'
class HtmlReader(HTMLParser):
IGNORE = 0
INITIAL = 1
PROCESS = 2
def __init__(self):
HTMLParser.__init__(self)
self._encoding = 'ISO-8859-1'
self._handlers = {'table_start' : self.table_start,
'table_end' : self.table_end,
'tr_start' : self.tr_start,
'tr_end' : self.tr_end,
'td_start' : self.td_start,
'td_end' : self.td_end,
'th_start' : self.td_start,
'th_end' : self.td_end,
'br_start' : self.br_start,
'meta_start' : self.meta_start}
def read(self, htmlfile, populator):
self.populator = populator
self.state = self.IGNORE
self.current_row = None
self.current_cell = None
for line in htmlfile.readlines():
self.feed(self._decode(line))
# Calling close is required by the HTMLParser but may cause problems
# if the same instance of our HtmlParser is reused. Currently it's
# used only once so there's no problem.
self.close()
self.populator.eof()
def _decode(self, line):
return line.decode(self._encoding)
def handle_starttag(self, tag, attrs):
handler = self._handlers.get(tag+'_start')
if handler is not None:
handler(attrs)
def handle_endtag(self, tag):
handler = self._handlers.get(tag+'_end')
if handler is not None:
handler()
def handle_data(self, data):
if self.state == self.IGNORE or self.current_cell is None:
return
if NON_BREAKING_SPACE in data:
data = data.replace(NON_BREAKING_SPACE, ' ')
self.current_cell.append(data)
def handle_entityref(self, name):
value = self._handle_entityref(name)
self.handle_data(value)
def _handle_entityref(self, name):
if name == 'apos': # missing from entitydefs
return "'"
try:
value = entitydefs[name]
except KeyError:
return '&'+name+';'
if value.startswith('&#'):
return unichr(int(value[2:-1]))
return value.decode('ISO-8859-1')
def handle_charref(self, number):
value = self._handle_charref(number)
self.handle_data(value)
def _handle_charref(self, number):
if number.startswith(('x', 'X')):
base = 16
number = number[1:]
else:
base = 10
try:
return unichr(int(number, base))
except ValueError:
return '&#'+number+';'
def unknown_decl(self, data):
# Ignore everything even if it's invalid. This kind of stuff comes
# at least from MS Excel
pass
def table_start(self, attrs=None):
self.state = self.INITIAL
self.current_row = None
self.current_cell = None
def table_end(self):
if self.current_row is not None:
self.tr_end()<|fim▁hole|> self.tr_end()
self.current_row = []
def tr_end(self):
if self.current_row is None:
return
if self.current_cell is not None:
self.td_end()
if self.state == self.INITIAL:
accepted = self.populator.start_table(self.current_row)
self.state = self.PROCESS if accepted else self.IGNORE
elif self.state == self.PROCESS:
self.populator.add(self.current_row)
self.current_row = None
def td_start(self, attrs=None):
if self.current_cell is not None:
self.td_end()
if self.current_row is None:
self.tr_start()
self.current_cell = []
def td_end(self):
if self.current_cell is not None and self.state != self.IGNORE:
cell = ''.join(self.current_cell)
self.current_row.append(cell)
self.current_cell = None
def br_start(self, attrs=None):
self.handle_data('\n')
def meta_start(self, attrs):
encoding = self._get_encoding_from_meta(attrs)
if encoding:
self._encoding = encoding
def _get_encoding_from_meta(self, attrs):
valid_http_equiv = False
encoding = None
for name, value in attrs:
name = name.lower()
if name == 'charset': # html5
return value
if name == 'http-equiv' and value.lower() == 'content-type':
valid_http_equiv = True
if name == 'content':
encoding = self._get_encoding_from_content_attr(value)
return encoding if valid_http_equiv else None
def _get_encoding_from_content_attr(self, value):
for token in value.split(';'):
token = token.strip()
if token.lower().startswith('charset='):
return token[8:]
def handle_pi(self, data):
encoding = self._get_encoding_from_pi(data)
if encoding:
self._encoding = encoding
def _get_encoding_from_pi(self, data):
data = data.strip()
if not data.lower().startswith('xml '):
return None
if data.endswith('?'):
data = data[:-1]
for token in data.split():
if token.lower().startswith('encoding='):
encoding = token[9:]
if encoding.startswith("'") or encoding.startswith('"'):
encoding = encoding[1:-1]
return encoding
return None<|fim▁end|>
|
self.state = self.IGNORE
def tr_start(self, attrs=None):
if self.current_row is not None:
|
<|file_name|>callee.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
* Handles translation of callees as well as other call-related
* things. Callees are a superset of normal rust values and sometimes
* have different representations. In particular, top-level fn items
* and methods are represented as just a fn ptr and not a full
* closure.
*/
use std::vec;
use back::abi;
use driver::session;
use lib::llvm::{ValueRef, NoAliasAttribute, StructRetAttribute};
use lib::llvm::llvm;
use metadata::csearch;
use middle::trans::base;
use middle::trans::base::*;
use middle::trans::build::*;
use middle::trans::callee;
use middle::trans::common;
use middle::trans::common::*;
use middle::trans::datum::*;
use middle::trans::datum::Datum;
use middle::trans::expr;
use middle::trans::glue;
use middle::trans::inline;
use middle::trans::meth;
use middle::trans::monomorphize;
use middle::trans::type_of;
use middle::trans::foreign;
use middle::ty;
use middle::subst::Subst;
use middle::typeck;
use middle::typeck::coherence::make_substs_for_receiver_types;
use util::ppaux::Repr;
use middle::trans::type_::Type;
use syntax::ast;
use syntax::abi::AbiSet;
use syntax::ast_map;
use syntax::visit;
use syntax::visit::Visitor;
// Represents a (possibly monomorphized) top-level fn item or method
// item. Note that this is just the fn-ptr and is not a Rust closure
// value (which is a pair).
pub struct FnData {
llfn: ValueRef,
}
pub struct MethodData {
llfn: ValueRef,
llself: ValueRef,
temp_cleanup: Option<ValueRef>,
self_mode: ty::SelfMode,
}
pub enum CalleeData {
Closure(Datum),
Fn(FnData),
Method(MethodData)
}
pub struct Callee {
bcx: @mut Block,
data: CalleeData
}
pub fn trans(bcx: @mut Block, expr: &ast::Expr) -> Callee {
let _icx = push_ctxt("trans_callee");
debug2!("callee::trans(expr={})", expr.repr(bcx.tcx()));
// pick out special kinds of expressions that can be called:
match expr.node {
ast::ExprPath(_) => {
return trans_def(bcx, bcx.def(expr.id), expr);
}
_ => {}
}
// any other expressions are closures:
return datum_callee(bcx, expr);
fn datum_callee(bcx: @mut Block, expr: &ast::Expr) -> Callee {
let DatumBlock {bcx, datum} = expr::trans_to_datum(bcx, expr);
match ty::get(datum.ty).sty {
ty::ty_bare_fn(*) => {
let llval = datum.to_appropriate_llval(bcx);
return Callee {bcx: bcx, data: Fn(FnData {llfn: llval})};
}
ty::ty_closure(*) => {<|fim▁hole|> bcx.tcx().sess.span_bug(
expr.span,
format!("Type of callee is neither bare-fn nor closure: {}",
bcx.ty_to_str(datum.ty)));
}
}
}
fn fn_callee(bcx: @mut Block, fd: FnData) -> Callee {
return Callee {bcx: bcx, data: Fn(fd)};
}
fn trans_def(bcx: @mut Block, def: ast::Def, ref_expr: &ast::Expr) -> Callee {
match def {
ast::DefFn(did, _) |
ast::DefStaticMethod(did, ast::FromImpl(_), _) => {
fn_callee(bcx, trans_fn_ref(bcx, did, ref_expr.id))
}
ast::DefStaticMethod(impl_did,
ast::FromTrait(trait_did),
_) => {
fn_callee(bcx, meth::trans_static_method_callee(bcx, impl_did,
trait_did,
ref_expr.id))
}
ast::DefVariant(tid, vid, _) => {
// nullary variants are not callable
assert!(ty::enum_variant_with_id(bcx.tcx(),
tid,
vid).args.len() > 0u);
fn_callee(bcx, trans_fn_ref(bcx, vid, ref_expr.id))
}
ast::DefStruct(def_id) => {
fn_callee(bcx, trans_fn_ref(bcx, def_id, ref_expr.id))
}
ast::DefStatic(*) |
ast::DefArg(*) |
ast::DefLocal(*) |
ast::DefBinding(*) |
ast::DefUpvar(*) |
ast::DefSelf(*) => {
datum_callee(bcx, ref_expr)
}
ast::DefMod(*) | ast::DefForeignMod(*) | ast::DefTrait(*) |
ast::DefTy(*) | ast::DefPrimTy(*) |
ast::DefUse(*) | ast::DefTyParamBinder(*) |
ast::DefRegion(*) | ast::DefLabel(*) | ast::DefTyParam(*) |
ast::DefSelfTy(*) | ast::DefMethod(*) => {
bcx.tcx().sess.span_bug(
ref_expr.span,
format!("Cannot translate def {:?} \
to a callable thing!", def));
}
}
}
}
pub fn trans_fn_ref_to_callee(bcx: @mut Block,
def_id: ast::DefId,
ref_id: ast::NodeId) -> Callee {
Callee {bcx: bcx,
data: Fn(trans_fn_ref(bcx, def_id, ref_id))}
}
pub fn trans_fn_ref(bcx: @mut Block,
def_id: ast::DefId,
ref_id: ast::NodeId) -> FnData {
/*!
*
* Translates a reference (with id `ref_id`) to the fn/method
* with id `def_id` into a function pointer. This may require
* monomorphization or inlining. */
let _icx = push_ctxt("trans_fn_ref");
let type_params = node_id_type_params(bcx, ref_id);
let vtables = node_vtables(bcx, ref_id);
debug2!("trans_fn_ref(def_id={}, ref_id={:?}, type_params={}, vtables={})",
def_id.repr(bcx.tcx()), ref_id, type_params.repr(bcx.tcx()),
vtables.repr(bcx.tcx()));
trans_fn_ref_with_vtables(bcx, def_id, ref_id, type_params, vtables)
}
pub fn trans_fn_ref_with_vtables_to_callee(
bcx: @mut Block,
def_id: ast::DefId,
ref_id: ast::NodeId,
type_params: &[ty::t],
vtables: Option<typeck::vtable_res>)
-> Callee {
Callee {bcx: bcx,
data: Fn(trans_fn_ref_with_vtables(bcx, def_id, ref_id,
type_params, vtables))}
}
fn resolve_default_method_vtables(bcx: @mut Block,
impl_id: ast::DefId,
method: &ty::Method,
substs: &ty::substs,
impl_vtables: Option<typeck::vtable_res>)
-> (typeck::vtable_res, typeck::vtable_param_res) {
// Get the vtables that the impl implements the trait at
let impl_res = ty::lookup_impl_vtables(bcx.tcx(), impl_id);
// Build up a param_substs that we are going to resolve the
// trait_vtables under.
let param_substs = Some(@param_substs {
tys: substs.tps.clone(),
self_ty: substs.self_ty,
vtables: impl_vtables,
self_vtables: None
});
let trait_vtables_fixed = resolve_vtables_under_param_substs(
bcx.tcx(), param_substs, impl_res.trait_vtables);
// Now we pull any vtables for parameters on the actual method.
let num_method_vtables = method.generics.type_param_defs.len();
let method_vtables = match impl_vtables {
Some(vtables) => {
let num_impl_type_parameters =
vtables.len() - num_method_vtables;
vtables.tailn(num_impl_type_parameters).to_owned()
},
None => vec::from_elem(num_method_vtables, @~[])
};
let param_vtables = @(*trait_vtables_fixed + method_vtables);
let self_vtables = resolve_param_vtables_under_param_substs(
bcx.tcx(), param_substs, impl_res.self_vtables);
(param_vtables, self_vtables)
}
pub fn trans_fn_ref_with_vtables(
bcx: @mut Block, //
def_id: ast::DefId, // def id of fn
ref_id: ast::NodeId, // node id of use of fn; may be zero if N/A
type_params: &[ty::t], // values for fn's ty params
vtables: Option<typeck::vtable_res>) // vtables for the call
-> FnData {
/*!
* Translates a reference to a fn/method item, monomorphizing and
* inlining as it goes.
*
* # Parameters
*
* - `bcx`: the current block where the reference to the fn occurs
* - `def_id`: def id of the fn or method item being referenced
* - `ref_id`: node id of the reference to the fn/method, if applicable.
* This parameter may be zero; but, if so, the resulting value may not
* have the right type, so it must be cast before being used.
* - `type_params`: values for each of the fn/method's type parameters
* - `vtables`: values for each bound on each of the type parameters
*/
let _icx = push_ctxt("trans_fn_ref_with_vtables");
let ccx = bcx.ccx();
let tcx = ccx.tcx;
debug2!("trans_fn_ref_with_vtables(bcx={}, def_id={}, ref_id={:?}, \
type_params={}, vtables={})",
bcx.to_str(),
def_id.repr(bcx.tcx()),
ref_id,
type_params.repr(bcx.tcx()),
vtables.repr(bcx.tcx()));
assert!(type_params.iter().all(|t| !ty::type_needs_infer(*t)));
// Polytype of the function item (may have type params)
let fn_tpt = ty::lookup_item_type(tcx, def_id);
let substs = ty::substs { regions: ty::ErasedRegions,
self_ty: None,
tps: /*bad*/ type_params.to_owned() };
// Load the info for the appropriate trait if necessary.
match ty::trait_of_method(tcx, def_id) {
None => {}
Some(trait_id) => {
ty::populate_implementations_for_trait_if_necessary(tcx, trait_id)
}
}
// We need to do a bunch of special handling for default methods.
// We need to modify the def_id and our substs in order to monomorphize
// the function.
let (is_default, def_id, substs, self_vtables, vtables) =
match ty::provided_source(tcx, def_id) {
None => (false, def_id, substs, None, vtables),
Some(source_id) => {
// There are two relevant substitutions when compiling
// default methods. First, there is the substitution for
// the type parameters of the impl we are using and the
// method we are calling. This substitution is the substs
// argument we already have.
// In order to compile a default method, though, we need
// to consider another substitution: the substitution for
// the type parameters on trait; the impl we are using
// implements the trait at some particular type
// parameters, and we need to substitute for those first.
// So, what we need to do is find this substitution and
// compose it with the one we already have.
let impl_id = ty::method(tcx, def_id).container_id();
let method = ty::method(tcx, source_id);
let trait_ref = ty::impl_trait_ref(tcx, impl_id)
.expect("could not find trait_ref for impl with \
default methods");
// Compute the first substitution
let first_subst = make_substs_for_receiver_types(
tcx, impl_id, trait_ref, method);
// And compose them
let new_substs = first_subst.subst(tcx, &substs);
let (param_vtables, self_vtables) =
resolve_default_method_vtables(bcx, impl_id,
method, &substs, vtables);
debug2!("trans_fn_with_vtables - default method: \
substs = {}, trait_subst = {}, \
first_subst = {}, new_subst = {}, \
vtables = {}, \
self_vtable = {}, param_vtables = {}",
substs.repr(tcx), trait_ref.substs.repr(tcx),
first_subst.repr(tcx), new_substs.repr(tcx),
vtables.repr(tcx),
self_vtables.repr(tcx), param_vtables.repr(tcx));
(true, source_id,
new_substs, Some(self_vtables), Some(param_vtables))
}
};
// Check whether this fn has an inlined copy and, if so, redirect
// def_id to the local id of the inlined copy.
let def_id = {
if def_id.crate != ast::LOCAL_CRATE {
inline::maybe_instantiate_inline(ccx, def_id)
} else {
def_id
}
};
// We must monomorphise if the fn has type parameters, is a rust
// intrinsic, or is a default method. In particular, if we see an
// intrinsic that is inlined from a different crate, we want to reemit the
// intrinsic instead of trying to call it in the other crate.
let must_monomorphise;
if type_params.len() > 0 || is_default {
must_monomorphise = true;
} else if def_id.crate == ast::LOCAL_CRATE {
let map_node = session::expect(
ccx.sess,
ccx.tcx.items.find(&def_id.node),
|| format!("local item should be in ast map"));
match *map_node {
ast_map::node_foreign_item(_, abis, _, _) => {
must_monomorphise = abis.is_intrinsic()
}
_ => {
must_monomorphise = false;
}
}
} else {
must_monomorphise = false;
}
// Create a monomorphic verison of generic functions
if must_monomorphise {
// Should be either intra-crate or inlined.
assert_eq!(def_id.crate, ast::LOCAL_CRATE);
let (val, must_cast) =
monomorphize::monomorphic_fn(ccx, def_id, &substs,
vtables, self_vtables,
Some(ref_id));
let mut val = val;
if must_cast && ref_id != 0 {
// Monotype of the REFERENCE to the function (type params
// are subst'd)
let ref_ty = common::node_id_type(bcx, ref_id);
val = PointerCast(
bcx, val, type_of::type_of_fn_from_ty(ccx, ref_ty).ptr_to());
}
return FnData {llfn: val};
}
// Find the actual function pointer.
let mut val = {
if def_id.crate == ast::LOCAL_CRATE {
// Internal reference.
get_item_val(ccx, def_id.node)
} else {
// External reference.
trans_external_path(ccx, def_id, fn_tpt.ty)
}
};
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
// library, they may not use precisely the same types: for
// example, they will probably each declare their own structs,
// which are distinct types from LLVM's point of view (nominal
// types).
//
// Now, if those two crates are linked into an application, and
// they contain inlined code, you can wind up with a situation
// where both of those functions wind up being loaded into this
// application simultaneously. In that case, the same function
// (from LLVM's point of view) requires two types. But of course
// LLVM won't allow one function to have two types.
//
// What we currently do, therefore, is declare the function with
// one of the two types (whichever happens to come first) and then
// bitcast as needed when the function is referenced to make sure
// it has the type we expect.
//
// This can occur on either a crate-local or crate-external
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
let llty = type_of::type_of_fn_from_ty(ccx, fn_tpt.ty);
let llptrty = llty.ptr_to();
if val_ty(val) != llptrty {
val = BitCast(bcx, val, llptrty);
}
return FnData {llfn: val};
}
// ______________________________________________________________________
// Translating calls
pub fn trans_call(in_cx: @mut Block,
call_ex: &ast::Expr,
f: &ast::Expr,
args: CallArgs,
id: ast::NodeId,
dest: expr::Dest)
-> @mut Block {
let _icx = push_ctxt("trans_call");
trans_call_inner(in_cx,
call_ex.info(),
expr_ty(in_cx, f),
node_id_type(in_cx, id),
|cx| trans(cx, f),
args,
Some(dest),
DontAutorefArg).bcx
}
pub fn trans_method_call(in_cx: @mut Block,
call_ex: &ast::Expr,
callee_id: ast::NodeId,
rcvr: &ast::Expr,
args: CallArgs,
dest: expr::Dest)
-> @mut Block {
let _icx = push_ctxt("trans_method_call");
debug2!("trans_method_call(call_ex={}, rcvr={})",
call_ex.repr(in_cx.tcx()),
rcvr.repr(in_cx.tcx()));
trans_call_inner(
in_cx,
call_ex.info(),
node_id_type(in_cx, callee_id),
expr_ty(in_cx, call_ex),
|cx| {
match cx.ccx().maps.method_map.find_copy(&call_ex.id) {
Some(origin) => {
debug2!("origin for {}: {}",
call_ex.repr(in_cx.tcx()),
origin.repr(in_cx.tcx()));
meth::trans_method_callee(cx,
callee_id,
rcvr,
origin)
}
None => {
cx.tcx().sess.span_bug(call_ex.span, "method call expr wasn't in method map")
}
}
},
args,
Some(dest),
DontAutorefArg).bcx
}
pub fn trans_lang_call(bcx: @mut Block,
did: ast::DefId,
args: &[ValueRef],
dest: Option<expr::Dest>)
-> Result {
let fty = if did.crate == ast::LOCAL_CRATE {
ty::node_id_to_type(bcx.ccx().tcx, did.node)
} else {
csearch::get_type(bcx.ccx().tcx, did).ty
};
let rty = ty::ty_fn_ret(fty);
callee::trans_call_inner(bcx,
None,
fty,
rty,
|bcx| {
trans_fn_ref_with_vtables_to_callee(bcx,
did,
0,
[],
None)
},
ArgVals(args),
dest,
DontAutorefArg)
}
pub fn trans_lang_call_with_type_params(bcx: @mut Block,
did: ast::DefId,
args: &[ValueRef],
type_params: &[ty::t],
dest: expr::Dest)
-> @mut Block {
let fty;
if did.crate == ast::LOCAL_CRATE {
fty = ty::node_id_to_type(bcx.tcx(), did.node);
} else {
fty = csearch::get_type(bcx.tcx(), did).ty;
}
let rty = ty::ty_fn_ret(fty);
return callee::trans_call_inner(
bcx, None, fty, rty,
|bcx| {
let callee =
trans_fn_ref_with_vtables_to_callee(bcx, did, 0,
type_params,
None);
let new_llval;
match callee.data {
Fn(fn_data) => {
let substituted = ty::subst_tps(callee.bcx.tcx(),
type_params,
None,
fty);
let llfnty = type_of::type_of(callee.bcx.ccx(),
substituted);
new_llval = PointerCast(callee.bcx, fn_data.llfn, llfnty);
}
_ => fail2!()
}
Callee { bcx: callee.bcx, data: Fn(FnData { llfn: new_llval }) }
},
ArgVals(args), Some(dest), DontAutorefArg).bcx;
}
struct CalleeTranslationVisitor {
flag: bool,
}
impl Visitor<()> for CalleeTranslationVisitor {
fn visit_item(&mut self, _:@ast::item, _:()) { }
fn visit_expr(&mut self, e:@ast::Expr, _:()) {
if !self.flag {
match e.node {
ast::ExprRet(_) => self.flag = true,
_ => visit::walk_expr(self, e, ()),
}
}
}
}
pub fn body_contains_ret(body: &ast::Block) -> bool {
let mut v = CalleeTranslationVisitor{ flag: false };
visit::walk_block(&mut v, body, ());
v.flag
}
pub fn trans_call_inner(in_cx: @mut Block,
call_info: Option<NodeInfo>,
callee_ty: ty::t,
ret_ty: ty::t,
get_callee: &fn(@mut Block) -> Callee,
args: CallArgs,
dest: Option<expr::Dest>,
autoref_arg: AutorefArg)
-> Result {
/*!
* This behemoth of a function translates function calls.
* Unfortunately, in order to generate more efficient LLVM
* output at -O0, it has quite a complex signature (refactoring
* this into two functions seems like a good idea).
*
* In particular, for lang items, it is invoked with a dest of
* None, and
*/
do base::with_scope_result(in_cx, call_info, "call") |cx| {
let callee = get_callee(cx);
let mut bcx = callee.bcx;
let ccx = cx.ccx();
let (llfn, llenv) = unsafe {
match callee.data {
Fn(d) => {
(d.llfn, llvm::LLVMGetUndef(Type::opaque_box(ccx).ptr_to().to_ref()))
}
Method(d) => {
// Weird but true: we pass self in the *environment* slot!
(d.llfn, d.llself)
}
Closure(d) => {
// Closures are represented as (llfn, llclosure) pair:
// load the requisite values out.
let pair = d.to_ref_llval(bcx);
let llfn = GEPi(bcx, pair, [0u, abi::fn_field_code]);
let llfn = Load(bcx, llfn);
let llenv = GEPi(bcx, pair, [0u, abi::fn_field_box]);
let llenv = Load(bcx, llenv);
(llfn, llenv)
}
}
};
let abi = match ty::get(callee_ty).sty {
ty::ty_bare_fn(ref f) => f.abis,
_ => AbiSet::Rust()
};
let is_rust_fn =
abi.is_rust() ||
abi.is_intrinsic();
// Generate a location to store the result. If the user does
// not care about the result, just make a stack slot.
let opt_llretslot = match dest {
None => {
assert!(!type_of::return_uses_outptr(in_cx.ccx(), ret_ty));
None
}
Some(expr::SaveIn(dst)) => Some(dst),
Some(expr::Ignore) => {
if !ty::type_is_voidish(in_cx.tcx(), ret_ty) {
Some(alloc_ty(bcx, ret_ty, "__llret"))
} else {
unsafe {
Some(llvm::LLVMGetUndef(Type::nil().ptr_to().to_ref()))
}
}
}
};
let mut llresult = unsafe {
llvm::LLVMGetUndef(Type::nil().ptr_to().to_ref())
};
// The code below invokes the function, using either the Rust
// conventions (if it is a rust fn) or the native conventions
// (otherwise). The important part is that, when all is sad
// and done, either the return value of the function will have been
// written in opt_llretslot (if it is Some) or `llresult` will be
// set appropriately (otherwise).
if is_rust_fn {
let mut llargs = ~[];
// Push the out-pointer if we use an out-pointer for this
// return type, otherwise push "undef".
if type_of::return_uses_outptr(in_cx.ccx(), ret_ty) {
llargs.push(opt_llretslot.unwrap());
}
// Push the environment.
llargs.push(llenv);
// Push the arguments.
bcx = trans_args(bcx, args, callee_ty,
autoref_arg, &mut llargs);
// Now that the arguments have finished evaluating, we
// need to revoke the cleanup for the self argument
match callee.data {
Method(d) => {
for &v in d.temp_cleanup.iter() {
revoke_clean(bcx, v);
}
}
_ => {}
}
// A function pointer is called without the declaration available, so we have to apply
// any attributes with ABI implications directly to the call instruction. Right now, the
// only attribute we need to worry about is `sret`.
let mut attrs = ~[];
if type_of::return_uses_outptr(in_cx.ccx(), ret_ty) {
attrs.push((1, StructRetAttribute));
}
// The `noalias` attribute on the return value is useful to a function ptr caller.
match ty::get(ret_ty).sty {
// `~` pointer return values never alias because ownership is transferred
ty::ty_uniq(*) |
ty::ty_evec(_, ty::vstore_uniq) => {
attrs.push((0, NoAliasAttribute));
}
_ => ()
}
// Invoke the actual rust fn and update bcx/llresult.
let (llret, b) = base::invoke(bcx, llfn, llargs, attrs);
bcx = b;
llresult = llret;
// If the Rust convention for this type is return via
// the return value, copy it into llretslot.
match opt_llretslot {
Some(llretslot) => {
if !type_of::return_uses_outptr(bcx.ccx(), ret_ty) &&
!ty::type_is_voidish(bcx.tcx(), ret_ty)
{
Store(bcx, llret, llretslot);
}
}
None => {}
}
} else {
// Lang items are the only case where dest is None, and
// they are always Rust fns.
assert!(dest.is_some());
let mut llargs = ~[];
bcx = trans_args(bcx, args, callee_ty,
autoref_arg, &mut llargs);
bcx = foreign::trans_native_call(bcx, callee_ty,
llfn, opt_llretslot.unwrap(), llargs);
}
// If the caller doesn't care about the result of this fn call,
// drop the temporary slot we made.
match dest {
None => {
assert!(!type_of::return_uses_outptr(bcx.ccx(), ret_ty));
}
Some(expr::Ignore) => {
// drop the value if it is not being saved.
bcx = glue::drop_ty(bcx, opt_llretslot.unwrap(), ret_ty);
}
Some(expr::SaveIn(_)) => { }
}
if ty::type_is_bot(ret_ty) {
Unreachable(bcx);
}
rslt(bcx, llresult)
}
}
pub enum CallArgs<'self> {
ArgExprs(&'self [@ast::Expr]),
ArgVals(&'self [ValueRef])
}
pub fn trans_args(cx: @mut Block,
args: CallArgs,
fn_ty: ty::t,
autoref_arg: AutorefArg,
llargs: &mut ~[ValueRef]) -> @mut Block
{
let _icx = push_ctxt("trans_args");
let mut temp_cleanups = ~[];
let arg_tys = ty::ty_fn_args(fn_ty);
let mut bcx = cx;
// First we figure out the caller's view of the types of the arguments.
// This will be needed if this is a generic call, because the callee has
// to cast her view of the arguments to the caller's view.
match args {
ArgExprs(arg_exprs) => {
for (i, arg_expr) in arg_exprs.iter().enumerate() {
let arg_val = unpack_result!(bcx, {
trans_arg_expr(bcx,
arg_tys[i],
ty::ByCopy,
*arg_expr,
&mut temp_cleanups,
autoref_arg)
});
llargs.push(arg_val);
}
}
ArgVals(vs) => {
llargs.push_all(vs);
}
}
// now that all arguments have been successfully built, we can revoke any
// temporary cleanups, as they are only needed if argument construction
// should fail (for example, cleanup of copy mode args).
for c in temp_cleanups.iter() {
revoke_clean(bcx, *c)
}
bcx
}
pub enum AutorefArg {
DontAutorefArg,
DoAutorefArg
}
// temp_cleanups: cleanups that should run only if failure occurs before the
// call takes place:
pub fn trans_arg_expr(bcx: @mut Block,
formal_arg_ty: ty::t,
self_mode: ty::SelfMode,
arg_expr: &ast::Expr,
temp_cleanups: &mut ~[ValueRef],
autoref_arg: AutorefArg) -> Result {
let _icx = push_ctxt("trans_arg_expr");
let ccx = bcx.ccx();
debug2!("trans_arg_expr(formal_arg_ty=({}), self_mode={:?}, arg_expr={})",
formal_arg_ty.repr(bcx.tcx()),
self_mode,
arg_expr.repr(bcx.tcx()));
// translate the arg expr to a datum
let arg_datumblock = expr::trans_to_datum(bcx, arg_expr);
let arg_datum = arg_datumblock.datum;
let bcx = arg_datumblock.bcx;
debug2!(" arg datum: {}", arg_datum.to_str(bcx.ccx()));
let mut val;
if ty::type_is_bot(arg_datum.ty) {
// For values of type _|_, we generate an
// "undef" value, as such a value should never
// be inspected. It's important for the value
// to have type lldestty (the callee's expected type).
let llformal_arg_ty = type_of::type_of(ccx, formal_arg_ty);
unsafe {
val = llvm::LLVMGetUndef(llformal_arg_ty.to_ref());
}
} else {
// FIXME(#3548) use the adjustments table
match autoref_arg {
DoAutorefArg => {
val = arg_datum.to_ref_llval(bcx);
}
DontAutorefArg => {
let need_scratch = ty::type_needs_drop(bcx.tcx(), arg_datum.ty) ||
(bcx.expr_is_lval(arg_expr) &&
arg_datum.appropriate_mode(bcx.ccx()).is_by_ref());
let arg_datum = if need_scratch {
let scratch = scratch_datum(bcx, arg_datum.ty, "__self", false);
arg_datum.store_to_datum(bcx, INIT, scratch);
// Technically, ownership of val passes to the callee.
// However, we must cleanup should we fail before the
// callee is actually invoked.
scratch.add_clean(bcx);
temp_cleanups.push(scratch.val);
scratch
} else {
arg_datum
};
val = match self_mode {
ty::ByRef => {
debug2!("by ref arg with type {}", bcx.ty_to_str(arg_datum.ty));
arg_datum.to_ref_llval(bcx)
}
ty::ByCopy => {
debug2!("by copy arg with type {}", bcx.ty_to_str(arg_datum.ty));
arg_datum.to_appropriate_llval(bcx)
}
}
}
}
if formal_arg_ty != arg_datum.ty {
// this could happen due to e.g. subtyping
let llformal_arg_ty = type_of::type_of_explicit_arg(ccx, formal_arg_ty);
debug2!("casting actual type ({}) to match formal ({})",
bcx.val_to_str(val), bcx.llty_str(llformal_arg_ty));
val = PointerCast(bcx, val, llformal_arg_ty);
}
}
debug2!("--- trans_arg_expr passing {}", bcx.val_to_str(val));
return rslt(bcx, val);
}<|fim▁end|>
|
return Callee {bcx: bcx, data: Closure(datum)};
}
_ => {
|
<|file_name|>analyzer_wysinwyx.rs<|end_file_name|><|fim▁begin|>//! This module offers structs and traits for valueset analysis as inctroduced
//! in "Analyzing Memory Access in x86 Executables" by Gogul Balakrishnan and
//! Thomas Reps
//! For a more complete work on this topic, see the dissertation(!) (there is
//! also an article with the same title) of Gogul Balakrishnan:
//! "WYSINWYX: WHAT YOU SEE IS NOT WHAT YOU EXECUTE"<|fim▁hole|>//! an a-loc is an "abstract location" representing roughly a variable in C
//! This implementation is still work in progress.
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use petgraph::graph::{NodeIndex,EdgeIndex};
use super::{StridedInterval_u,AbstractValue};
use super::mem_structs::{AbstractStore,MemRegion,A_Loc,AbstractAddress};
use super::mem_structs::{MemRegionType};
use frontend::containers::{RModule,RadecoModule,RFunction,RadecoFunction};
use frontend::bindings::{RBindings,RadecoBindings,Binding};
use frontend::source::Source;
use middle::ssa::ssa_traits::NodeData as TNodeData;
use middle::ssa::ssa_traits::{SSA,NodeType,ValueType};
use middle::ssa::ssastorage::{NodeData,SSAStorage};
use middle::ir::{MOpcode,MAddress,MArity};
use middle::ir_writer::{IRWriter};
use r2api::structs::{LRegInfo};
//use esil::parser::{Parse, Parser};
//use esil::lexer::{Token, Tokenizer};
// General Notes:
// for efficient impl of abstract store use "applicative dictionaries"
// (see end of Analyzing Memory Accesses in x86 Executables end of sec 3)
// each instruction (node in our case?) takes an abstract store as input
// and outputs one - this implementation does have to adapt this
// implement those 'transformers'
fn perform_op(op: MOpcode, operands: Vec<i64>) -> i64 {
debug!("\t\t\tperform op: {:?}, {:?}", op, operands);
match op {
MOpcode::OpAdd => operands[0] + operands[1],
MOpcode::OpSub => operands[0] - operands[1],
//MOpcode::OpMul => operands[0] * operands[1], //FIXME panics on overflow
MOpcode::OpMul => {
let (res, overflow) = operands[0].overflowing_mul(operands[1]);
// Actually I do not know how to hande this correctly.
// (Even _whether_ this is to be handled.)
if overflow {warn!("Multiplication overflowed!")};
res
},
MOpcode::OpDiv => operands[0] / operands[1],
MOpcode::OpMod => operands[0] % operands[1],
MOpcode::OpAnd => operands[0] & operands[1],
MOpcode::OpOr => operands[0] | operands[1],
MOpcode::OpXor => operands[0] ^ operands[1],
MOpcode::OpNot => !operands[0],
//MOpcode::OpEq => operands[0] == operands[1],
//MOpcode::OpGt => operands[0] - operands[1],
//MOpcode::OpLt => operands[0] - operands[1],
MOpcode::OpLsl => operands[0] << operands[1],
MOpcode::OpLsr => operands[0] >> operands[1],
//MOpcode::OpNarrow(_) => ("narrow", MArity::Unary),
//MOpcode::OpWiden(_) => ("widen", MArity::Unary),
MOpcode::OpConst(c) => c as i64,
_ => 0,
}
}
//TODO get information about available registers/architecture/...
//from somewhere (ssa?)
fn is_stack_pointer(comment: &String) -> bool {
match comment.as_ref() {
"rsp" | "esp" => {true}
_ => {false}
}
}
fn is_base_pointer(comment: &String) -> bool {
match comment.as_ref() {
"rbp" | "ebp" => {true}
_ => {false}
}
}
fn is_gen_purpose_reg(comment: &String) -> bool {
match comment.as_ref() {
"rax" | "eax" |
"rbx" | "ebx" |
"rcx" | "ecx" |
"rdx" | "edx" |
"rdi" | "edi" |
"rsi" | "esi" |
"r11"
| "af" | "cf" | "of" | "pf" | "sf" | "tf" | "zf" | "ds"
=> {true}
_ => {false}
}
}
fn is_instruction_pointer(comment: &String) -> bool {
match comment.as_ref() {
"rip" | "eip" => {true}
_ => {false}
}
}
fn is_register(comment: &String) -> bool {
is_stack_pointer(comment)
| is_base_pointer(comment)
| is_gen_purpose_reg(comment)
| is_instruction_pointer(comment)
}
pub struct FnAnalyzer<RFn>
where RFn : RFunction + Clone
{
//rfn: RFn,
ssa: RFn::SSA,
a_store_fn: AbstractStore<<<RFn as RFunction>::SSA as SSA>::ValueRef>,
mem_reg_local: MemRegion,
stack_size: Option<u64>,
}
impl<RFn> FnAnalyzer<RFn>
where RFn: RFunction + Clone,
<RFn as RFunction>::SSA: Clone
{
pub fn from(rfn: RFn) -> FnAnalyzer<RFn> {
FnAnalyzer {
//rfn: rfn,
ssa: (*rfn.ssa_ref()).clone(),
a_store_fn: AbstractStore::new(),
mem_reg_local: MemRegion::new(MemRegionType::Local),
stack_size: None,
}
}
/// Print the SSA node as one expression
/// for example: load((rax + 42) - 23)
fn print_node_as_comp(&self,
node: <<RFn as RFunction>::SSA as SSA>::ValueRef)
-> String {
let op_type = self.ssa.get_node_data(&node).expect("No node data.").nt;
//debug!("print_node: {:?}", op_type);
match op_type {
NodeType::Op(opcode) => {
let ops = self.ssa.get_operands(&node);
match opcode.arity() {
MArity::Zero => {
match opcode {
MOpcode::OpConst(c) => format!("{}", c),
_ => format!("{}", opcode.to_string()),
}
},
MArity::Unary => {
format!("{}{}",
opcode.to_string(),
self.print_node_as_comp(ops[0]))
},
MArity::Binary => {
match opcode {
MOpcode::OpLoad => {
//format!("{}({}, {})",
// opcode.to_string(),
// self.print_node_as_comp(ops[0]),
// self.print_node_as_comp(ops[1]))
//},
format!("{}({})",
opcode.to_string(),
self.print_node_as_comp(ops[1]))
},
MOpcode::OpStore => {
// FIXME probably has wrong arity
//format!("{}({}, {}, {})",
// opcode.to_string(),
// self.print_node_as_comp(ops[0]),
// self.print_node_as_comp(ops[1]),
// self.print_node_as_comp(ops[2]))
format!("{}({}, {})",
opcode.to_string(),
self.print_node_as_comp(ops[1]),
self.print_node_as_comp(ops[2]))
},
_ => format!("({} {} {})",
self.print_node_as_comp(ops[0]),
opcode.to_string(),
self.print_node_as_comp(ops[1])),
}
},
MArity::Ternary => {
format!("{}({}, {}, {})",
opcode.to_string(),
self.print_node_as_comp(ops[0]),
self.print_node_as_comp(ops[1]),
self.print_node_as_comp(ops[2]))
},
}
},
NodeType::Comment(c) => format!("{}", c),
//NodeType::Phi => format!("(Phi)"),
NodeType::Phi => {
let mut ret = format!("Phi(");
let ops = self.ssa.get_operands(&node);
for op in ops {
ret = ret + &format!("{}, ", self.print_node_as_comp(op));
}
ret + &format!(")")
},
NodeType::Undefined => format!("(Undefinded optype)"),
}
}
/// When a load operation is given, it tries to load
/// something from a given aloc.
/// This function traverses all store operations,
/// checks whether the given a-loc matches,
/// if so, return value stored to a-loc,
/// else , return uninitialized value
/// Takes as arguments the store node, and the a-loc
fn compute_loaded_value(&self,
node: <<RFn as RFunction>::SSA as SSA>::ValueRef,
(a_loc_base, a_loc_offs):
(A_Loc<<<RFn as RFunction>::SSA as SSA>::ValueRef>,
i64))
-> StridedInterval_u
{
debug!("compute_loaded_value({:?})", node);
debug!("\tcalc: {}", self.print_node_as_comp(node));
debug!("\ta-loc base: {}, offs: {}", a_loc_base, a_loc_offs);
let node_data = self.ssa.get_node_data(&node).expect("No node data.");
let op_type = node_data.nt;
let operands = self.ssa.get_operands(&node);
//debug!("\toperands: {:?}", operands);
//debug!("\t\t{:?} - {:?}", op_type, node);
//for op in &operands {
// let nd = self.ssa.get_node_data(&op).expect("No node data.");
// debug!("\t\t\t{:?}", nd.nt);
//}
match op_type {
NodeType::Comment(ref c) if c.eq("mem") => {
//debug!("\t\t\t\tNo matching a-loc found - ret uninitialized");
// We are loading from a memory region of which we don't know the value
StridedInterval_u::new()
},
NodeType::Op(MOpcode::OpStore) => {
let mem_state = operands[0];
let target_node = operands[1];
let value_node = operands[2];
let (a_loc_base_stored, a_loc_offs_stored) =
self.compute_a_loc(target_node)
.expect("No base a-loc to store operation");
debug!("\t\t\t\tcomparing to:\n\t\t\t{}, {}",
a_loc_base_stored, a_loc_offs_stored);
if (a_loc_base.clone(), a_loc_offs) ==
(a_loc_base_stored.clone(), a_loc_offs_stored) {
debug!("\t\t\t\tmatching - found storing location, get stored value");
self.compute_abstract_value(value_node)
} else {
debug!("\t\t\t\tNo matching a-loc - continuing search");
self.compute_loaded_value(mem_state, (a_loc_base, a_loc_offs))
}
},
NodeType::Phi => {
let loaded_val1 =
self.compute_loaded_value(operands[0],
(a_loc_base.clone(), a_loc_offs));
let loaded_val2 =
self.compute_loaded_value(operands[1],
(a_loc_base.clone(), a_loc_offs));
loaded_val1.join(loaded_val2)
},
_ => {
error!("unexpected op_type `{}'", op_type);
panic!()
} // This should never be called on something TODO: handle with .expect()
// that is not an OpStore or Comment("mem")
}
}
/// Computes an abstract value for a given node
/// uninitialized value for Comment("<reg>")
/// uninitialized value for Comment("mem")
/// c for Constant(c)
/// value at a-loc (+ offset) for Load from a-loc (+ offset)
/// stored value for Store
/// result of (arithmetic) operation
fn compute_abstract_value(&self,
node: <<RFn as RFunction>::SSA as SSA>::ValueRef)
-> StridedInterval_u
{
let node_data = self.ssa.get_node_data(&node).expect("No node data.");
let op_type = node_data.nt;
let operands = self.ssa.get_operands(&node);
debug!("compute_concrete_value({:?})", node);
debug!("\tcalc: {:?}", self.print_node_as_comp(node));
debug!("\toperands: {:?}", operands);
match op_type {
NodeType::Comment(ref c) if is_register(c) => {
StridedInterval_u::Undefined
},
NodeType::Op(MOpcode::OpConst(c)) => {
StridedInterval_u::from_const(c as i64)
},
//NodeType::Comment(ref c) if is_base_pointer(c) => {},
//NodeType::Comment(ref c) if is_gen_purpose_reg(c) => {},
NodeType::Op(MOpcode::OpStore) => {
self.compute_abstract_value(operands[2])
},
NodeType::Op(MOpcode::OpLoad) => {
let (a_loc_base, a_loc_offs) =
self.compute_a_loc(operands[1])
.expect("No base a-loc found");
self.compute_loaded_value(operands[0], (a_loc_base, a_loc_offs))
},
//TODO use process_op()
NodeType::Op(MOpcode::OpAdd) => {
self.compute_abstract_value(operands[0]) +
self.compute_abstract_value(operands[1])
},
NodeType::Op(MOpcode::OpSub) => {
self.compute_abstract_value(operands[0]) -
self.compute_abstract_value(operands[1])
},
NodeType::Op(MOpcode::OpMul) => {
self.compute_abstract_value(operands[0]) *
self.compute_abstract_value(operands[1])
},
NodeType::Op(MOpcode::OpDiv) => {
self.compute_abstract_value(operands[0]) /
self.compute_abstract_value(operands[1])
},
//NodeType::Op(opcode) => {
// match opcode {}
//},
_ => {
warn!("Fallthrough");
StridedInterval_u::Undefined
}, // FIXME
}
}
/// Computes either underlying register or memory region plus offset
/// takes a node as argument
// TODO seems conceptually broken, a-loc already includes offset
// second ret value same as compute_abstract_value?
// currently returns stuff with ambigous meaning:
// on OpStore returns the content of the a-loc
// instead of a-loc offset (which is already included in the a-loc)
fn compute_a_loc(&self, node: <<RFn as RFunction>::SSA as SSA>::ValueRef)
-> Option<(A_Loc<<<RFn as RFunction>::SSA as SSA>::ValueRef>, i64)>
{
debug!("compute_a_loc({:?})", node);
debug!("\tcalc: {:?}", self.print_node_as_comp(node));
let node_data = self.ssa.get_node_data(&node).expect("No node data.");
let ValueType::Integer {width} = node_data.vt;
let op_type = node_data.nt;
let operands = self.ssa.get_operands(&node);
match op_type {
//NodeType::Comment(ref c) if is_stack_pointer(c) => {
// just found a stack pointer - nothing special
// TODO store information about stack in FnAnalyzer
// TODO what about use of basepointer?
// let vt = node_data.vt;
// debug!("Found Local a-loc - rsp (compute_a_loc), offset: 0");
// Some((A_Loc {
// addr: AbstractAddress::MemAddr {
// region: self.mem_reg_local.clone(),
// offset: 0, //TODO other initial value?
// },
// size: Some(width as i64),
// }, 0))
//},
//NodeType::Comment(ref c) if is_base_pointer(c) => {
NodeType::Comment(ref c) if is_register(c) => {
Some((A_Loc {
addr: AbstractAddress::Reg {
reg_name: c.clone(),
},
size: Some(width as i64),
}, 0))
},
NodeType::Comment(ref c) if c.eq("mem") => {
// TODO Probably not what we want, but easier to detect in the end
debug!("Found Global a-loc (compute_a_loc), offs: 0");
Some((A_Loc {
addr: AbstractAddress::MemAddr {
// TODO Have *one* global memregion
region: MemRegion::new(MemRegionType::Global),
// TODO 0 is probably not the right offset
offset: 0,
},
size: Some(width as i64),
}, 0))
},
NodeType::Comment(c) => None, // TODO
NodeType::Op(MOpcode::OpStore) => { //TODO
// should return set of all a-loc that have been written to
// up to 'now'
// value that will be stored in the a-loc
let mem_state = operands[0];
let target_node = operands[1];
let value_node = operands[2];
let value = self.compute_abstract_value(value_node).as_const();
// TODO we want to get value of a-loc, too
if let Some((a_loc_base, a_loc_offs)) =
self.compute_a_loc(target_node) {
if let A_Loc{
addr: AbstractAddress::Reg{ reg_name: reg_name},
..} = a_loc_base {
let mem_reg = if reg_name.eq("rip") { // what about "mem"?
MemRegion{region_type: MemRegionType::Global}
} else {
self.mem_reg_local.clone()
};
Some ((A_Loc {
addr: AbstractAddress::MemAddr {
region: mem_reg,
offset: a_loc_offs,
},
size: Some(width as i64),
}, value))
} else {None}
} else {None}
},
NodeType::Op(MOpcode::OpLoad) => {None}, // TODO
NodeType::Op(m_opcode) => {
if operands.len() >= 2 {
let update = self.compute_abstract_value(operands[1]).as_const();
if let Some((a_loc_base, a_loc_offs)) =
self.compute_a_loc(self.ssa.lhs(&node)) {
Some((a_loc_base,
perform_op(m_opcode, vec![a_loc_offs, update]))) //TODO
} else {None}
} else if operands.len() >= 1 {
let update = self.compute_abstract_value(operands[0]).as_const();
if let Some((a_loc_base, a_loc_offs)) =
self.compute_a_loc(self.ssa.lhs(&node)) {
Some((a_loc_base,
perform_op(m_opcode, vec![a_loc_offs, update]))) //TODO
} else {None}
} else {None}
},
NodeType::Phi => {
// TODO will this code ever be executed?
// For the case both a-locs are the same, simply return
let a_loc_off_a = self.compute_a_loc(operands[0]);
let a_loc_off_b = self.compute_a_loc(operands[1]);
if a_loc_off_a == a_loc_off_b {
a_loc_off_a
} else {
warn!("don't know which a-loc to return");
None // TODO what to do otherwise?
}
},
NodeType::Undefined => None,
}
}
/// Analyze a single function.
//TODO: rename -> analyze ?
pub fn analyze_rfn(mut self)
-> AbstractStore<<<RFn as RFunction>::SSA as SSA>::ValueRef>
{
// mem region for function
info!("analyzing function");
for node in self.ssa.nodes() {
debug!("analyzing node: {:?}", node);
if let Ok(node_data) = self.ssa.get_node_data(&node) {
debug!("\t\tnode data: {:?}", node_data);
debug!("\t\tcalc: {}", self.print_node_as_comp (node));
debug!("\t\tvalue: {}", self.compute_abstract_value (node));
}
if self.ssa.is_expr (&node) {
debug!("\t\tis expr: {:?}", node);
//debug!("\t\toperands: {:?}", self.ssa.get_operands(&node));
//debug!("\t\tnode data: {:?}", self.ssa.get_node_data(&node));
debug!("\t\tcalc: {}", self.print_node_as_comp (node));
debug!("\t\tvalue: {}", self.compute_abstract_value (node));
//debug!("\t\t#operands ({}):", self.ssa.get_operands(&node).len());
//for operand in self.ssa.get_operands (&node) {
// debug!("\t\t\t{:?}", self.ssa.get_node_data(&operand));
// //debug!("\t\t\tinvolved regs:");
// //for reg in involved_registers (self.ssa, operand) {
// // debug!("\t\t\t\t{:?}", reg);
// //}
//}
{ //compute a-loc for SSA node
let a_loc = A_Loc {
addr: AbstractAddress::new_ssa_node(node),
size: None, // FIXME
};
let content = self.compute_abstract_value(node);
debug!("Computed abstract value: {}", content);
self.a_store_fn.store.insert(a_loc.clone(), content);
}
debug!("Computed a-loc for SSA-node");
//compute a-loc/check for existing a-loc
if let Some((a_loc_base, a_loc_offs)) =
self.compute_a_loc(node) {
debug!("Computed a-loc");
let op_type = self.ssa.get_node_data(&node)
.expect("No node data.").nt;
//debug!("{:?}", op_type);
debug!("calc: {}", self.print_node_as_comp (node));
//compute value-set
let content = self.compute_abstract_value(node);
debug!("a-loc: {}", a_loc_base);
debug!("\tcontent: {}", content);
self.a_store_fn.update (a_loc_base.clone(), content.clone());
//update a-loc -> value-set
}
}
};
self.a_store_fn
}
}
/// A Value Set Analyzer (VSAnalyzer)
/// This analyzes access patterns in memory
// make generic over architecture
pub trait ValueSetAnalyzer {
type N: Hash + Eq + Clone;
fn analyze_value_sets_ssa (&self) -> AbstractStore<Self::N>;
//fn analyze_value_sets_esil (&'a mut self) -> AbstractStore<N>;
}
impl<'a, F: RFunction + Clone> ValueSetAnalyzer for RadecoModule<'a, F>
where <F as RFunction>::SSA: Clone
{
type N = <<F as RFunction>::SSA as SSA>::ValueRef;
fn analyze_value_sets_ssa (&self) -> AbstractStore<Self::N> {
let mut a_store = AbstractStore::new();
let mem_reg_global: MemRegion = MemRegion::new(MemRegionType::Global);
let fkns = self.functions.iter();
for (ref addr, rfn) in fkns {
if (!rfn.fn_name().eq("sym.main")) & (!rfn.fn_name().eq("main")) {
continue;
}
let fn_analyzer = FnAnalyzer::from((*rfn).clone());
let mut a_store_fn = fn_analyzer.analyze_rfn();
a_store.merge (&mut a_store_fn);
}
println!("Returning Abstract Store:");
for (a_loc, strid_interv) in &a_store.store {
if let A_Loc{addr: AbstractAddress::Node{node: node}, ..} = *a_loc {
continue;
};
println!("{:?}", a_loc);
println!("Strided Interval: {}", strid_interv);
}
a_store
}
//fn analyze_value_sets_esil (&'a mut self) -> AbstractStore {
//}
}
#[cfg(test)]
mod vsa {
use super::*;
use frontend::containers::RadecoModule;
use frontend::source::FileSource;
#[test]
#[ignore]
// Disable it temporarily.
fn exist_ssa() {
let mut fsource = FileSource::open(Some("./test_files/ct1_sccp_ex/ct1_sccp_ex"));
let rmod = RadecoModule::from(&mut fsource);
let a_store = rmod.analyze_value_sets_ssa ();
}
}<|fim▁end|>
|
//! It offers datastructures specific to memory access
//! VSA (value-set analysis) analyzes access patterns on memory
|
<|file_name|>globalscope.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::cell::{DomRefCell, RefMut};
use crate::dom::bindings::codegen::Bindings::BroadcastChannelBinding::BroadcastChannelMethods;
use crate::dom::bindings::codegen::Bindings::EventSourceBinding::EventSourceBinding::EventSourceMethods;
use crate::dom::bindings::codegen::Bindings::ImageBitmapBinding::{
ImageBitmapOptions, ImageBitmapSource,
};
use crate::dom::bindings::codegen::Bindings::PermissionStatusBinding::PermissionState;
use crate::dom::bindings::codegen::Bindings::VoidFunctionBinding::VoidFunction;
use crate::dom::bindings::codegen::Bindings::WindowBinding::WindowMethods;
use crate::dom::bindings::codegen::Bindings::WorkerGlobalScopeBinding::WorkerGlobalScopeMethods;
use crate::dom::bindings::conversions::{root_from_object, root_from_object_static};
use crate::dom::bindings::error::{report_pending_exception, Error, ErrorInfo};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::refcounted::{Trusted, TrustedPromise};
use crate::dom::bindings::reflector::DomObject;
use crate::dom::bindings::root::{Dom, DomRoot, MutNullableDom};
use crate::dom::bindings::settings_stack::{entry_global, incumbent_global, AutoEntryScript};
use crate::dom::bindings::str::DOMString;
use crate::dom::bindings::structuredclone;
use crate::dom::bindings::utils::to_frozen_array;
use crate::dom::bindings::weakref::{DOMTracker, WeakRef};
use crate::dom::blob::Blob;
use crate::dom::broadcastchannel::BroadcastChannel;
use crate::dom::crypto::Crypto;
use crate::dom::dedicatedworkerglobalscope::{
DedicatedWorkerControlMsg, DedicatedWorkerGlobalScope,
};
use crate::dom::errorevent::ErrorEvent;
use crate::dom::event::{Event, EventBubbles, EventCancelable, EventStatus};
use crate::dom::eventsource::EventSource;
use crate::dom::eventtarget::EventTarget;
use crate::dom::file::File;
use crate::dom::gpudevice::GPUDevice;
use crate::dom::htmlscriptelement::{ScriptId, SourceCode};
use crate::dom::identityhub::Identities;
use crate::dom::imagebitmap::ImageBitmap;
use crate::dom::messageevent::MessageEvent;
use crate::dom::messageport::MessagePort;
use crate::dom::paintworkletglobalscope::PaintWorkletGlobalScope;
use crate::dom::performance::Performance;
use crate::dom::performanceobserver::VALID_ENTRY_TYPES;
use crate::dom::promise::Promise;
use crate::dom::readablestream::{ExternalUnderlyingSource, ReadableStream};
use crate::dom::serviceworker::ServiceWorker;
use crate::dom::serviceworkerregistration::ServiceWorkerRegistration;
use crate::dom::window::Window;
use crate::dom::workerglobalscope::WorkerGlobalScope;
use crate::dom::workletglobalscope::WorkletGlobalScope;
use crate::microtask::{Microtask, MicrotaskQueue, UserMicrotask};
use crate::realms::{enter_realm, AlreadyInRealm, InRealm};
use crate::script_module::{DynamicModuleList, ModuleTree};
use crate::script_module::{ModuleScript, ScriptFetchOptions};
use crate::script_runtime::{
CommonScriptMsg, ContextForRequestInterrupt, JSContext as SafeJSContext, ScriptChan, ScriptPort,
};
use crate::script_thread::{MainThreadScriptChan, ScriptThread};
use crate::task::TaskCanceller;
use crate::task_source::dom_manipulation::DOMManipulationTaskSource;
use crate::task_source::file_reading::FileReadingTaskSource;
use crate::task_source::networking::NetworkingTaskSource;
use crate::task_source::performance_timeline::PerformanceTimelineTaskSource;
use crate::task_source::port_message::PortMessageQueue;
use crate::task_source::remote_event::RemoteEventTaskSource;
use crate::task_source::timer::TimerTaskSource;
use crate::task_source::websocket::WebsocketTaskSource;
use crate::task_source::TaskSource;
use crate::task_source::TaskSourceName;
use crate::timers::{IsInterval, OneshotTimerCallback, OneshotTimerHandle};
use crate::timers::{OneshotTimers, TimerCallback};
use content_security_policy::CspList;
use crossbeam_channel::Sender;
use devtools_traits::{PageError, ScriptToDevtoolsControlMsg};
use dom_struct::dom_struct;
use embedder_traits::EmbedderMsg;
use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
use js::glue::{IsWrapper, UnwrapObjectDynamic};
use js::jsapi::Compile1;
use js::jsapi::SetScriptPrivate;
use js::jsapi::{CurrentGlobalOrNull, GetNonCCWObjectGlobal};
use js::jsapi::{HandleObject, Heap};
use js::jsapi::{JSContext, JSObject, JSScript};
use js::jsval::PrivateValue;
use js::jsval::{JSVal, UndefinedValue};
use js::panic::maybe_resume_unwind;
use js::rust::transform_str_to_source_text;
use js::rust::wrappers::{JS_ExecuteScript, JS_GetScriptPrivate};
use js::rust::{get_object_class, CompileOptionsWrapper, ParentRuntime, Runtime};
use js::rust::{HandleValue, MutableHandleValue};
use js::{JSCLASS_IS_DOMJSCLASS, JSCLASS_IS_GLOBAL};
use msg::constellation_msg::{
BlobId, BroadcastChannelRouterId, MessagePortId, MessagePortRouterId, PipelineId,
ServiceWorkerId, ServiceWorkerRegistrationId,
};
use net_traits::blob_url_store::{get_blob_origin, BlobBuf};
use net_traits::filemanager_thread::{
FileManagerResult, FileManagerThreadMsg, ReadFileProgress, RelativePos,
};
use net_traits::image_cache::ImageCache;
use net_traits::request::Referrer;
use net_traits::response::HttpsState;
use net_traits::{CoreResourceMsg, CoreResourceThread, IpcSend, ResourceThreads};
use parking_lot::Mutex;
use profile_traits::{ipc as profile_ipc, mem as profile_mem, time as profile_time};
use script_traits::serializable::{BlobData, BlobImpl, FileBlob};
use script_traits::transferable::MessagePortImpl;
use script_traits::{
BroadcastMsg, MessagePortMsg, MsDuration, PortMessageTask, ScriptMsg,
ScriptToConstellationChan, TimerEvent,
};
use script_traits::{TimerEventId, TimerSchedulerMsg, TimerSource};
use servo_url::{ImmutableOrigin, MutableOrigin, ServoUrl};
use std::borrow::Cow;
use std::cell::Cell;
use std::collections::hash_map::Entry;
use std::collections::{HashMap, VecDeque};
use std::mem;
use std::ops::Index;
use std::rc::Rc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::thread::JoinHandle;
use time::{get_time, Timespec};
use uuid::Uuid;
use webgpu::{identity::WebGPUOpResult, ErrorScopeId, WebGPUDevice};
#[derive(JSTraceable)]
pub struct AutoCloseWorker {
/// https://html.spec.whatwg.org/multipage/#dom-workerglobalscope-closing
closing: Arc<AtomicBool>,
/// A handle to join on the worker thread.
join_handle: Option<JoinHandle<()>>,
/// A sender of control messages,
/// currently only used to signal shutdown.
control_sender: Sender<DedicatedWorkerControlMsg>,
/// The context to request an interrupt on the worker thread.
context: ContextForRequestInterrupt,
}
impl Drop for AutoCloseWorker {
/// <https://html.spec.whatwg.org/multipage/#terminate-a-worker>
fn drop(&mut self) {
// Step 1.
self.closing.store(true, Ordering::SeqCst);
if self
.control_sender
.send(DedicatedWorkerControlMsg::Exit)
.is_err()
{
warn!("Couldn't send an exit message to a dedicated worker.");
}
self.context.request_interrupt();
// TODO: step 2 and 3.
// Step 4 is unnecessary since we don't use actual ports for dedicated workers.
if self
.join_handle
.take()
.expect("No handle to join on worker.")
.join()
.is_err()
{
warn!("Failed to join on dedicated worker thread.");
}
}
}
#[dom_struct]
pub struct GlobalScope {
eventtarget: EventTarget,
crypto: MutNullableDom<Crypto>,
/// The message-port router id for this global, if it is managing ports.
message_port_state: DomRefCell<MessagePortState>,
/// The broadcast channels state this global, if it is managing any.
broadcast_channel_state: DomRefCell<BroadcastChannelState>,
/// The blobs managed by this global, if any.
blob_state: DomRefCell<BlobState>,
/// <https://w3c.github.io/ServiceWorker/#environment-settings-object-service-worker-registration-object-map>
registration_map:
DomRefCell<HashMap<ServiceWorkerRegistrationId, Dom<ServiceWorkerRegistration>>>,
/// <https://w3c.github.io/ServiceWorker/#environment-settings-object-service-worker-object-map>
worker_map: DomRefCell<HashMap<ServiceWorkerId, Dom<ServiceWorker>>>,
/// Pipeline id associated with this global.
pipeline_id: PipelineId,
/// A flag to indicate whether the developer tools has requested
/// live updates from the worker.
devtools_wants_updates: Cell<bool>,
/// Timers used by the Console API.
console_timers: DomRefCell<HashMap<DOMString, u64>>,
/// module map is used when importing JavaScript modules
/// https://html.spec.whatwg.org/multipage/#concept-settings-object-module-map
#[ignore_malloc_size_of = "mozjs"]
module_map: DomRefCell<HashMap<ServoUrl, Rc<ModuleTree>>>,
#[ignore_malloc_size_of = "mozjs"]
inline_module_map: DomRefCell<HashMap<ScriptId, Rc<ModuleTree>>>,
/// For providing instructions to an optional devtools server.
#[ignore_malloc_size_of = "channels are hard"]
devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
/// For sending messages to the memory profiler.
#[ignore_malloc_size_of = "channels are hard"]
mem_profiler_chan: profile_mem::ProfilerChan,
/// For sending messages to the time profiler.
#[ignore_malloc_size_of = "channels are hard"]
time_profiler_chan: profile_time::ProfilerChan,
/// A handle for communicating messages to the constellation thread.
#[ignore_malloc_size_of = "channels are hard"]
script_to_constellation_chan: ScriptToConstellationChan,
#[ignore_malloc_size_of = "channels are hard"]
scheduler_chan: IpcSender<TimerSchedulerMsg>,
/// <https://html.spec.whatwg.org/multipage/#in-error-reporting-mode>
in_error_reporting_mode: Cell<bool>,
/// Associated resource threads for use by DOM objects like XMLHttpRequest,
/// including resource_thread, filemanager_thread and storage_thread
resource_threads: ResourceThreads,
/// The mechanism by which time-outs and intervals are scheduled.
/// <https://html.spec.whatwg.org/multipage/#timers>
timers: OneshotTimers,
/// Have timers been initialized?
init_timers: Cell<bool>,
/// The origin of the globalscope
origin: MutableOrigin,
/// https://html.spec.whatwg.org/multipage/#concept-environment-creation-url
creation_url: Option<ServoUrl>,
/// A map for storing the previous permission state read results.
permission_state_invocation_results: DomRefCell<HashMap<String, PermissionState>>,
/// The microtask queue associated with this global.
///
/// It is refcounted because windows in the same script thread share the
/// same microtask queue.
///
/// <https://html.spec.whatwg.org/multipage/#microtask-queue>
#[ignore_malloc_size_of = "Rc<T> is hard"]
microtask_queue: Rc<MicrotaskQueue>,
/// Vector storing closing references of all workers
#[ignore_malloc_size_of = "Arc"]
list_auto_close_worker: DomRefCell<Vec<AutoCloseWorker>>,
/// Vector storing references of all eventsources.
event_source_tracker: DOMTracker<EventSource>,
/// Storage for watching rejected promises waiting for some client to
/// consume their rejection.
/// Promises in this list have been rejected in the last turn of the
/// event loop without the rejection being handled.
/// Note that this can contain nullptrs in place of promises removed because
/// they're consumed before it'd be reported.
///
/// <https://html.spec.whatwg.org/multipage/#about-to-be-notified-rejected-promises-list>
#[ignore_malloc_size_of = "mozjs"]
uncaught_rejections: DomRefCell<Vec<Box<Heap<*mut JSObject>>>>,
/// Promises in this list have previously been reported as rejected
/// (because they were in the above list), but the rejection was handled
/// in the last turn of the event loop.
///
/// <https://html.spec.whatwg.org/multipage/#outstanding-rejected-promises-weak-set>
#[ignore_malloc_size_of = "mozjs"]
consumed_rejections: DomRefCell<Vec<Box<Heap<*mut JSObject>>>>,
/// True if headless mode.
is_headless: bool,
/// An optional string allowing the user agent to be set for testing.
user_agent: Cow<'static, str>,
/// Identity Manager for WebGPU resources
#[ignore_malloc_size_of = "defined in wgpu"]
gpu_id_hub: Arc<Mutex<Identities>>,
/// WebGPU devices
gpu_devices: DomRefCell<HashMap<WebGPUDevice, Dom<GPUDevice>>>,
// https://w3c.github.io/performance-timeline/#supportedentrytypes-attribute
#[ignore_malloc_size_of = "mozjs"]
frozen_supported_performance_entry_types: DomRefCell<Option<Heap<JSVal>>>,
/// currect https state (from previous request)
https_state: Cell<HttpsState>,
/// The stack of active group labels for the Console APIs.
console_group_stack: DomRefCell<Vec<DOMString>>,
/// List of ongoing dynamic module imports.
dynamic_modules: DomRefCell<DynamicModuleList>,
/// Is considered in a secure context
inherited_secure_context: Option<bool>,
}
/// A wrapper for glue-code between the ipc router and the event-loop.
struct MessageListener {
canceller: TaskCanceller,
task_source: PortMessageQueue,
context: Trusted<GlobalScope>,
}
/// A wrapper for broadcasts coming in over IPC, and the event-loop.
struct BroadcastListener {
canceller: TaskCanceller,
task_source: DOMManipulationTaskSource,
context: Trusted<GlobalScope>,
}
/// A wrapper between timer events coming in over IPC, and the event-loop.
struct TimerListener {
canceller: TaskCanceller,
task_source: TimerTaskSource,
context: Trusted<GlobalScope>,
}
/// A wrapper for the handling of file data received by the ipc router
struct FileListener {
/// State should progress as either of:
/// - Some(Empty) => Some(Receiving) => None
/// - Some(Empty) => None
state: Option<FileListenerState>,
task_source: FileReadingTaskSource,
task_canceller: TaskCanceller,
}
enum FileListenerCallback {
Promise(Box<dyn Fn(Rc<Promise>, Result<Vec<u8>, Error>) + Send>),
Stream,
}
enum FileListenerTarget {
Promise(TrustedPromise),
Stream(Trusted<ReadableStream>),
}
enum FileListenerState {
Empty(FileListenerCallback, FileListenerTarget),
Receiving(Vec<u8>, FileListenerCallback, FileListenerTarget),
}
#[derive(JSTraceable, MallocSizeOf)]
/// A holder of a weak reference for a DOM blob or file.
pub enum BlobTracker {
/// A weak ref to a DOM file.
File(WeakRef<File>),
/// A weak ref to a DOM blob.
Blob(WeakRef<Blob>),
}
#[derive(JSTraceable, MallocSizeOf)]
/// The info pertaining to a blob managed by this global.
pub struct BlobInfo {
/// The weak ref to the corresponding DOM object.
tracker: BlobTracker,
/// The data and logic backing the DOM object.
blob_impl: BlobImpl,
/// Whether this blob has an outstanding URL,
/// <https://w3c.github.io/FileAPI/#url>.
has_url: bool,
}
/// State representing whether this global is currently managing blobs.
#[derive(JSTraceable, MallocSizeOf)]
pub enum BlobState {
/// A map of managed blobs.
Managed(HashMap<BlobId, BlobInfo>),
/// This global is not managing any blobs at this time.
UnManaged,
}
/// The result of looking-up the data for a Blob,
/// containing either the in-memory bytes,
/// or the file-id.
enum BlobResult {
Bytes(Vec<u8>),
File(Uuid, usize),
}
/// Data representing a message-port managed by this global.
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub struct ManagedMessagePort {
/// The DOM port.
dom_port: Dom<MessagePort>,
/// The logic and data backing the DOM port.
/// The option is needed to take out the port-impl
/// as part of its transferring steps,
/// without having to worry about rooting the dom-port.
port_impl: Option<MessagePortImpl>,
/// We keep ports pending when they are first transfer-received,
/// and only add them, and ask the constellation to complete the transfer,
/// in a subsequent task if the port hasn't been re-transfered.
pending: bool,
/// Has the port been closed? If closed, it can be dropped and later GC'ed.
closed: bool,
}
/// State representing whether this global is currently managing broadcast channels.
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub enum BroadcastChannelState {
/// The broadcast-channel router id for this global, and a queue of managed channels.
/// Step 9, "sort destinations"
/// of https://html.spec.whatwg.org/multipage/#dom-broadcastchannel-postmessage
/// requires keeping track of creation order, hence the queue.
Managed(
BroadcastChannelRouterId,
/// The map of channel-name to queue of channels, in order of creation.
HashMap<DOMString, VecDeque<Dom<BroadcastChannel>>>,
),
/// This global is not managing any broadcast channels at this time.
UnManaged,
}
/// State representing whether this global is currently managing messageports.
#[derive(JSTraceable, MallocSizeOf)]
#[unrooted_must_root_lint::must_root]
pub enum MessagePortState {
/// The message-port router id for this global, and a map of managed ports.
Managed(
MessagePortRouterId,
HashMap<MessagePortId, ManagedMessagePort>,
),
/// This global is not managing any ports at this time.
UnManaged,
}
impl BroadcastListener {
/// Handle a broadcast coming in over IPC,
/// by queueing the appropriate task on the relevant event-loop.
fn handle(&self, event: BroadcastMsg) {
let context = self.context.clone();
// Note: strictly speaking we should just queue the message event tasks,
// not queue a task that then queues more tasks.
// This however seems to be hard to avoid in the light of the IPC.
// One can imagine queueing tasks directly,
// for channels that would be in the same script-thread.
let _ = self.task_source.queue_with_canceller(
task!(broadcast_message_event: move || {
let global = context.root();
// Step 10 of https://html.spec.whatwg.org/multipage/#dom-broadcastchannel-postmessage,
// For each BroadcastChannel object destination in destinations, queue a task.
global.broadcast_message_event(event, None);
}),
&self.canceller,
);
}
}
impl TimerListener {
/// Handle a timer-event coming-in over IPC,
/// by queuing the appropriate task on the relevant event-loop.
fn handle(&self, event: TimerEvent) {
let context = self.context.clone();
// Step 18, queue a task,
// https://html.spec.whatwg.org/multipage/#timer-initialisation-steps
let _ = self.task_source.queue_with_canceller(
task!(timer_event: move || {
let global = context.root();
let TimerEvent(source, id) = event;
match source {
TimerSource::FromWorker => {
global.downcast::<WorkerGlobalScope>().expect("Window timer delivered to worker");
},
TimerSource::FromWindow(pipeline) => {
assert_eq!(pipeline, global.pipeline_id());
global.downcast::<Window>().expect("Worker timer delivered to window");
},
};
// Step 7, substeps run in a task.
global.fire_timer(id);
}),
&self.canceller,
);
}
}
impl MessageListener {
/// A new message came in, handle it via a task enqueued on the event-loop.
/// A task is required, since we are using a trusted globalscope,
/// and we can only access the root from the event-loop.
fn notify(&self, msg: MessagePortMsg) {
match msg {
MessagePortMsg::CompleteTransfer(ports) => {
let context = self.context.clone();
let _ = self.task_source.queue_with_canceller(
task!(process_complete_transfer: move || {
let global = context.root();
let router_id = match global.port_router_id() {
Some(router_id) => router_id,
None => {
// If not managing any ports, no transfer can succeed,
// so just send back everything.
let _ = global.script_to_constellation_chan().send(
ScriptMsg::MessagePortTransferResult(None, vec![], ports),
);
return;
}
};
let mut succeeded = vec![];
let mut failed = HashMap::new();
for (id, buffer) in ports.into_iter() {
if global.is_managing_port(&id) {
succeeded.push(id.clone());
global.complete_port_transfer(id, buffer);
} else {
failed.insert(id, buffer);
}
}
let _ = global.script_to_constellation_chan().send(
ScriptMsg::MessagePortTransferResult(Some(router_id), succeeded, failed),
);
}),
&self.canceller,
);
},
MessagePortMsg::CompletePendingTransfer(port_id, buffer) => {
let context = self.context.clone();
let _ = self.task_source.queue_with_canceller(
task!(complete_pending: move || {
let global = context.root();
global.complete_port_transfer(port_id, buffer);
}),
&self.canceller,
);
},
MessagePortMsg::NewTask(port_id, task) => {
let context = self.context.clone();
let _ = self.task_source.queue_with_canceller(
task!(process_new_task: move || {
let global = context.root();
global.route_task_to_port(port_id, task);
}),
&self.canceller,
);
},
MessagePortMsg::RemoveMessagePort(port_id) => {
let context = self.context.clone();
let _ = self.task_source.queue_with_canceller(
task!(process_remove_message_port: move || {
let global = context.root();
global.note_entangled_port_removed(&port_id);
}),
&self.canceller,
);
},
}
}
}
/// Callback used to enqueue file chunks to streams as part of FileListener.
fn stream_handle_incoming(stream: &ReadableStream, bytes: Result<Vec<u8>, Error>) {
match bytes {
Ok(b) => {
stream.enqueue_native(b);
},
Err(e) => {
stream.error_native(e);
},
}
}
/// Callback used to close streams as part of FileListener.
fn stream_handle_eof(stream: &ReadableStream) {
stream.close_native();
}
impl FileListener {
fn handle(&mut self, msg: FileManagerResult<ReadFileProgress>) {
match msg {
Ok(ReadFileProgress::Meta(blob_buf)) => match self.state.take() {
Some(FileListenerState::Empty(callback, target)) => {
let bytes = if let FileListenerTarget::Stream(ref trusted_stream) = target {
let trusted = trusted_stream.clone();
let task = task!(enqueue_stream_chunk: move || {
let stream = trusted.root();
stream_handle_incoming(&*stream, Ok(blob_buf.bytes));
});
let _ = self
.task_source
.queue_with_canceller(task, &self.task_canceller);
Vec::with_capacity(0)
} else {
blob_buf.bytes
};
self.state = Some(FileListenerState::Receiving(bytes, callback, target));
},
_ => panic!(
"Unexpected FileListenerState when receiving ReadFileProgress::Meta msg."
),
},
Ok(ReadFileProgress::Partial(mut bytes_in)) => match self.state.take() {
Some(FileListenerState::Receiving(mut bytes, callback, target)) => {
if let FileListenerTarget::Stream(ref trusted_stream) = target {
let trusted = trusted_stream.clone();
let task = task!(enqueue_stream_chunk: move || {
let stream = trusted.root();
stream_handle_incoming(&*stream, Ok(bytes_in));
});
let _ = self
.task_source
.queue_with_canceller(task, &self.task_canceller);
} else {
bytes.append(&mut bytes_in);
};
self.state = Some(FileListenerState::Receiving(bytes, callback, target));
},
_ => panic!(
"Unexpected FileListenerState when receiving ReadFileProgress::Partial msg."
),
},
Ok(ReadFileProgress::EOF) => match self.state.take() {
Some(FileListenerState::Receiving(bytes, callback, target)) => match target {
FileListenerTarget::Promise(trusted_promise) => {
let callback = match callback {
FileListenerCallback::Promise(callback) => callback,
_ => panic!("Expected promise callback."),
};
let task = task!(resolve_promise: move || {
let promise = trusted_promise.root();
let _ac = enter_realm(&*promise.global());
callback(promise, Ok(bytes));
});
let _ = self
.task_source
.queue_with_canceller(task, &self.task_canceller);
},
FileListenerTarget::Stream(trusted_stream) => {
let trusted = trusted_stream.clone();
let task = task!(enqueue_stream_chunk: move || {
let stream = trusted.root();
stream_handle_eof(&*stream);
});
let _ = self
.task_source
.queue_with_canceller(task, &self.task_canceller);
},
},
_ => {
panic!("Unexpected FileListenerState when receiving ReadFileProgress::EOF msg.")
},
},
Err(_) => match self.state.take() {
Some(FileListenerState::Receiving(_, callback, target)) |
Some(FileListenerState::Empty(callback, target)) => {
let error = Err(Error::Network);
match target {
FileListenerTarget::Promise(trusted_promise) => {
let callback = match callback {
FileListenerCallback::Promise(callback) => callback,
_ => panic!("Expected promise callback."),
};
let _ = self.task_source.queue_with_canceller(
task!(reject_promise: move || {
let promise = trusted_promise.root();
let _ac = enter_realm(&*promise.global());
callback(promise, error);
}),
&self.task_canceller,
);
},
FileListenerTarget::Stream(trusted_stream) => {
let _ = self.task_source.queue_with_canceller(
task!(error_stream: move || {
let stream = trusted_stream.root();
stream_handle_incoming(&*stream, error);
}),
&self.task_canceller,
);
},
}
},
_ => panic!("Unexpected FileListenerState when receiving Err msg."),
},
}
}
}
impl GlobalScope {
pub fn new_inherited(
pipeline_id: PipelineId,
devtools_chan: Option<IpcSender<ScriptToDevtoolsControlMsg>>,
mem_profiler_chan: profile_mem::ProfilerChan,
time_profiler_chan: profile_time::ProfilerChan,
script_to_constellation_chan: ScriptToConstellationChan,
scheduler_chan: IpcSender<TimerSchedulerMsg>,
resource_threads: ResourceThreads,
origin: MutableOrigin,
creation_url: Option<ServoUrl>,
microtask_queue: Rc<MicrotaskQueue>,
is_headless: bool,
user_agent: Cow<'static, str>,
gpu_id_hub: Arc<Mutex<Identities>>,
inherited_secure_context: Option<bool>,
) -> Self {
Self {
message_port_state: DomRefCell::new(MessagePortState::UnManaged),
broadcast_channel_state: DomRefCell::new(BroadcastChannelState::UnManaged),
blob_state: DomRefCell::new(BlobState::UnManaged),
eventtarget: EventTarget::new_inherited(),
crypto: Default::default(),
registration_map: DomRefCell::new(HashMap::new()),
worker_map: DomRefCell::new(HashMap::new()),
pipeline_id,
devtools_wants_updates: Default::default(),
console_timers: DomRefCell::new(Default::default()),
module_map: DomRefCell::new(Default::default()),
inline_module_map: DomRefCell::new(Default::default()),
devtools_chan,
mem_profiler_chan,
time_profiler_chan,
script_to_constellation_chan,
scheduler_chan: scheduler_chan.clone(),
in_error_reporting_mode: Default::default(),
resource_threads,
timers: OneshotTimers::new(scheduler_chan),
init_timers: Default::default(),
origin,
creation_url,
permission_state_invocation_results: Default::default(),
microtask_queue,
list_auto_close_worker: Default::default(),
event_source_tracker: DOMTracker::new(),
uncaught_rejections: Default::default(),
consumed_rejections: Default::default(),
is_headless,
user_agent,
gpu_id_hub,
gpu_devices: DomRefCell::new(HashMap::new()),
frozen_supported_performance_entry_types: DomRefCell::new(Default::default()),
https_state: Cell::new(HttpsState::None),
console_group_stack: DomRefCell::new(Vec::new()),
dynamic_modules: DomRefCell::new(DynamicModuleList::new()),
inherited_secure_context,
}
}
/// The message-port router Id of the global, if any
fn port_router_id(&self) -> Option<MessagePortRouterId> {
if let MessagePortState::Managed(id, _message_ports) = &*self.message_port_state.borrow() {
Some(id.clone())
} else {
None
}
}
/// Is this global managing a given port?
fn is_managing_port(&self, port_id: &MessagePortId) -> bool {
if let MessagePortState::Managed(_router_id, message_ports) =
&*self.message_port_state.borrow()
{
return message_ports.contains_key(port_id);
}
false
}
/// Setup the IPC-to-event-loop glue for timers to schedule themselves.
fn setup_timers(&self) {
if self.init_timers.get() {
return;
}
self.init_timers.set(true);
let (timer_ipc_chan, timer_ipc_port) = ipc::channel().unwrap();
self.timers.setup_scheduling(timer_ipc_chan);
// Setup route from IPC to task-queue for the timer-task-source.
let context = Trusted::new(&*self);
let (task_source, canceller) = (
self.timer_task_source(),
self.task_canceller(TaskSourceName::Timer),
);
let timer_listener = TimerListener {
context,
task_source,
canceller,
};
ROUTER.add_route(
timer_ipc_port.to_opaque(),
Box::new(move |message| {
let event = message.to().unwrap();
timer_listener.handle(event);
}),
);
}
/// <https://w3c.github.io/ServiceWorker/#get-the-service-worker-registration-object>
pub fn get_serviceworker_registration(
&self,
script_url: &ServoUrl,
scope: &ServoUrl,
registration_id: ServiceWorkerRegistrationId,
installing_worker: Option<ServiceWorkerId>,
_waiting_worker: Option<ServiceWorkerId>,
_active_worker: Option<ServiceWorkerId>,
) -> DomRoot<ServiceWorkerRegistration> {
// Step 1
let mut registrations = self.registration_map.borrow_mut();
if let Some(registration) = registrations.get(®istration_id) {
// Step 3
return DomRoot::from_ref(&**registration);
}
// Step 2.1 -> 2.5
let new_registration =
ServiceWorkerRegistration::new(self, scope.clone(), registration_id.clone());
// Step 2.6
if let Some(worker_id) = installing_worker {
let worker = self.get_serviceworker(script_url, scope, worker_id);
new_registration.set_installing(&*worker);
}
// TODO: 2.7 (waiting worker)
// TODO: 2.8 (active worker)
// Step 2.9
registrations.insert(registration_id, Dom::from_ref(&*new_registration));
// Step 3
new_registration
}
/// <https://w3c.github.io/ServiceWorker/#get-the-service-worker-object>
pub fn get_serviceworker(
&self,
script_url: &ServoUrl,
scope: &ServoUrl,
worker_id: ServiceWorkerId,
) -> DomRoot<ServiceWorker> {
// Step 1
let mut workers = self.worker_map.borrow_mut();
if let Some(worker) = workers.get(&worker_id) {
// Step 3
DomRoot::from_ref(&**worker)
} else {
// Step 2.1
// TODO: step 2.2, worker state.
let new_worker =
ServiceWorker::new(self, script_url.clone(), scope.clone(), worker_id.clone());
// Step 2.3
workers.insert(worker_id, Dom::from_ref(&*new_worker));
// Step 3
new_worker
}
}
/// Complete the transfer of a message-port.
fn complete_port_transfer(&self, port_id: MessagePortId, tasks: VecDeque<PortMessageTask>) {
let should_start = if let MessagePortState::Managed(_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
match message_ports.get_mut(&port_id) {
None => {
panic!("complete_port_transfer called for an unknown port.");
},
Some(managed_port) => {
if managed_port.pending {
panic!("CompleteTransfer msg received for a pending port.");
}
if let Some(port_impl) = managed_port.port_impl.as_mut() {
port_impl.complete_transfer(tasks);
port_impl.enabled()
} else {
panic!("managed-port has no port-impl.");
}
},
}
} else {
panic!("complete_port_transfer called for an unknown port.");
};
if should_start {
self.start_message_port(&port_id);
}
}
/// Clean-up DOM related resources
pub fn perform_a_dom_garbage_collection_checkpoint(&self) {
self.perform_a_message_port_garbage_collection_checkpoint();
self.perform_a_blob_garbage_collection_checkpoint();
self.perform_a_broadcast_channel_garbage_collection_checkpoint();
}
/// Remove the routers for ports and broadcast-channels.
/// Drain the list of workers.
pub fn remove_web_messaging_and_dedicated_workers_infra(&self) {
self.remove_message_ports_router();
self.remove_broadcast_channel_router();
// Drop each ref to a worker explicitly now,
// which will send a shutdown signal,
// and join on the worker thread.
self.list_auto_close_worker
.borrow_mut()
.drain(0..)
.for_each(|worker| drop(worker));
}
/// Update our state to un-managed,
/// and tell the constellation to drop the sender to our message-port router.
fn remove_message_ports_router(&self) {
if let MessagePortState::Managed(router_id, _message_ports) =
&*self.message_port_state.borrow()
{
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::RemoveMessagePortRouter(router_id.clone()));
}
*self.message_port_state.borrow_mut() = MessagePortState::UnManaged;
}
/// Update our state to un-managed,
/// and tell the constellation to drop the sender to our broadcast router.
fn remove_broadcast_channel_router(&self) {
if let BroadcastChannelState::Managed(router_id, _channels) =
&*self.broadcast_channel_state.borrow()
{
let _ =
self.script_to_constellation_chan()
.send(ScriptMsg::RemoveBroadcastChannelRouter(
router_id.clone(),
self.origin().immutable().clone(),
));
}
*self.broadcast_channel_state.borrow_mut() = BroadcastChannelState::UnManaged;
}
/// <https://html.spec.whatwg.org/multipage/#entangle>
pub fn entangle_ports(&self, port1: MessagePortId, port2: MessagePortId) {
if let MessagePortState::Managed(_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
for (port_id, entangled_id) in &[(port1, port2), (port2, port1)] {
match message_ports.get_mut(&port_id) {
None => {
return warn!("entangled_ports called on a global not managing the port.");
},
Some(managed_port) => {
if let Some(port_impl) = managed_port.port_impl.as_mut() {
managed_port.dom_port.entangle(entangled_id.clone());
port_impl.entangle(entangled_id.clone());
} else {
panic!("managed-port has no port-impl.");
}
},
}
}
} else {
panic!("entangled_ports called on a global not managing any ports.");
}
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::EntanglePorts(port1, port2));
}
/// Note that the entangled port of `port_id` has been removed in another global.
pub fn note_entangled_port_removed(&self, port_id: &MessagePortId) {
// Note: currently this is a no-op,
// as we only use the `close` method to manage the local lifecyle of a port.
// This could be used as part of lifecyle management to determine a port can be GC'ed.
// See https://github.com/servo/servo/issues/25772
warn!(
"Entangled port of {:?} has been removed in another global",
port_id
);
}
/// Handle the transfer of a port in the current task.
pub fn mark_port_as_transferred(&self, port_id: &MessagePortId) -> MessagePortImpl {
if let MessagePortState::Managed(_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
let mut port_impl = message_ports
.remove(&port_id)
.map(|ref mut managed_port| {
managed_port
.port_impl
.take()
.expect("Managed port doesn't have a port-impl.")
})
.expect("mark_port_as_transferred called on a global not managing the port.");
port_impl.set_has_been_shipped();
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::MessagePortShipped(port_id.clone()));
port_impl
} else {
panic!("mark_port_as_transferred called on a global not managing any ports.");
}
}
/// <https://html.spec.whatwg.org/multipage/#dom-messageport-start>
pub fn start_message_port(&self, port_id: &MessagePortId) {
if let MessagePortState::Managed(_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
let message_buffer = match message_ports.get_mut(&port_id) {
None => panic!("start_message_port called on a unknown port."),
Some(managed_port) => {
if let Some(port_impl) = managed_port.port_impl.as_mut() {
port_impl.start()
} else {
panic!("managed-port has no port-impl.");
}
},
};
if let Some(message_buffer) = message_buffer {
for task in message_buffer {
let port_id = port_id.clone();
let this = Trusted::new(&*self);
let _ = self.port_message_queue().queue(
task!(process_pending_port_messages: move || {
let target_global = this.root();
target_global.route_task_to_port(port_id, task);
}),
&self,
);
}
}
} else {
return warn!("start_message_port called on a global not managing any ports.");
}
}
/// <https://html.spec.whatwg.org/multipage/#dom-messageport-close>
pub fn close_message_port(&self, port_id: &MessagePortId) {
if let MessagePortState::Managed(_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
match message_ports.get_mut(&port_id) {
None => panic!("close_message_port called on an unknown port."),
Some(managed_port) => {
if let Some(port_impl) = managed_port.port_impl.as_mut() {
port_impl.close();
managed_port.closed = true;
} else {
panic!("managed-port has no port-impl.");
}
},
};
} else {
return warn!("close_message_port called on a global not managing any ports.");
}
}
/// <https://html.spec.whatwg.org/multipage/#message-port-post-message-steps>
// Steps 6 and 7
pub fn post_messageport_msg(&self, port_id: MessagePortId, task: PortMessageTask) {
if let MessagePortState::Managed(_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
let entangled_port = match message_ports.get_mut(&port_id) {
None => panic!("post_messageport_msg called on an unknown port."),
Some(managed_port) => {
if let Some(port_impl) = managed_port.port_impl.as_mut() {
port_impl.entangled_port_id()
} else {
panic!("managed-port has no port-impl.");
}
},
};
if let Some(entangled_id) = entangled_port {
// Step 7
let this = Trusted::new(&*self);
let _ = self.port_message_queue().queue(
task!(post_message: move || {
let global = this.root();
// Note: we do this in a task, as this will ensure the global and constellation
// are aware of any transfer that might still take place in the current task.
global.route_task_to_port(entangled_id, task);
}),
self,
);
}
} else {
return warn!("post_messageport_msg called on a global not managing any ports.");
}
}
/// If we don't know about the port,
/// send the message to the constellation for routing.
fn re_route_port_task(&self, port_id: MessagePortId, task: PortMessageTask) {
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::RerouteMessagePort(port_id, task));
}
/// <https://html.spec.whatwg.org/multipage/#dom-broadcastchannel-postmessage>
/// Step 7 and following steps.
pub fn schedule_broadcast(&self, msg: BroadcastMsg, channel_id: &Uuid) {
// First, broadcast locally.
self.broadcast_message_event(msg.clone(), Some(channel_id));
if let BroadcastChannelState::Managed(router_id, _) =
&*self.broadcast_channel_state.borrow()
{
// Second, broadcast to other globals via the constellation.
//
// Note: for globals in the same script-thread,
// we could skip the hop to the constellation.
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::ScheduleBroadcast(router_id.clone(), msg));
} else {
panic!("Attemps to broadcast a message via global not managing any channels.");
}
}
/// <https://html.spec.whatwg.org/multipage/#dom-broadcastchannel-postmessage>
/// Step 7 and following steps.
pub fn broadcast_message_event(&self, event: BroadcastMsg, channel_id: Option<&Uuid>) {
if let BroadcastChannelState::Managed(_, channels) = &*self.broadcast_channel_state.borrow()
{
let BroadcastMsg {
data,
origin,
channel_name,
} = event;
// Step 7, a few preliminary steps.
// - Check the worker is not closing.
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
if worker.is_closing() {
return;
}
}
// - Check the associated document is fully-active.
if let Some(window) = self.downcast::<Window>() {
if !window.Document().is_fully_active() {
return;
}
}
// - Check for a case-sensitive match for the name of the channel.
let channel_name = DOMString::from_string(channel_name);
if let Some(channels) = channels.get(&channel_name) {
channels
.iter()
.filter(|ref channel| {
// Step 8.
// Filter out the sender.
if let Some(id) = channel_id {
channel.id() != id
} else {
true
}
})
.map(|channel| DomRoot::from_ref(&**channel))
// Step 9, sort by creation order,
// done by using a queue to store channels in creation order.
.for_each(|channel| {
let data = data.clone_for_broadcast();
let origin = origin.clone();
// Step 10: Queue a task on the DOM manipulation task-source,
// to fire the message event
let channel = Trusted::new(&*channel);
let global = Trusted::new(&*self);
let _ = self.dom_manipulation_task_source().queue(
task!(process_pending_port_messages: move || {
let destination = channel.root();
let global = global.root();
// 10.1 Check for closed flag.
if destination.closed() {
return;
}
rooted!(in(*global.get_cx()) let mut message = UndefinedValue());
// Step 10.3 StructuredDeserialize(serialized, targetRealm).
if let Ok(ports) = structuredclone::read(&global, data, message.handle_mut()) {
// Step 10.4, Fire an event named message at destination.
MessageEvent::dispatch_jsval(
&*destination.upcast(),
&global,
message.handle(),
Some(&origin.ascii_serialization()),
None,
ports,
);
} else {
// Step 10.3, fire an event named messageerror at destination.
MessageEvent::dispatch_error(&*destination.upcast(), &global);
}
}),
&self,
);
});
}
}
}
/// Route the task to be handled by the relevant port.
pub fn route_task_to_port(&self, port_id: MessagePortId, task: PortMessageTask) {
let should_dispatch = if let MessagePortState::Managed(_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
if !message_ports.contains_key(&port_id) {
self.re_route_port_task(port_id, task);
return;
}
match message_ports.get_mut(&port_id) {
None => panic!("route_task_to_port called for an unknown port."),
Some(managed_port) => {
// If the port is not enabled yet, or if is awaiting the completion of it's transfer,
// the task will be buffered and dispatched upon enablement or completion of the transfer.
if let Some(port_impl) = managed_port.port_impl.as_mut() {
port_impl.handle_incoming(task).and_then(|to_dispatch| {
Some((DomRoot::from_ref(&*managed_port.dom_port), to_dispatch))
})
} else {
panic!("managed-port has no port-impl.");
}
},
}
} else {
self.re_route_port_task(port_id, task);
return;
};
if let Some((dom_port, PortMessageTask { origin, data })) = should_dispatch {
// Substep 3-4
rooted!(in(*self.get_cx()) let mut message_clone = UndefinedValue());
if let Ok(ports) = structuredclone::read(self, data, message_clone.handle_mut()) {
// Substep 6
// Dispatch the event, using the dom message-port.
MessageEvent::dispatch_jsval(
&dom_port.upcast(),
self,
message_clone.handle(),
Some(&origin.ascii_serialization()),
None,
ports,
);
} else {
// Step 4, fire messageerror event.
MessageEvent::dispatch_error(&dom_port.upcast(), self);
}
}
}
/// Check all ports that have been transfer-received in the previous task,
/// and complete their transfer if they haven't been re-transferred.
pub fn maybe_add_pending_ports(&self) {
if let MessagePortState::Managed(router_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
let to_be_added: Vec<MessagePortId> = message_ports
.iter()
.filter_map(|(id, managed_port)| {
if managed_port.pending {
Some(id.clone())
} else {
None
}
})
.collect();
for id in to_be_added.iter() {
let managed_port = message_ports
.get_mut(&id)
.expect("Collected port-id to match an entry");
if !managed_port.pending {
panic!("Only pending ports should be found in to_be_added")
}
managed_port.pending = false;
}
let _ =
self.script_to_constellation_chan()
.send(ScriptMsg::CompleteMessagePortTransfer(
router_id.clone(),
to_be_added,
));
} else {
warn!("maybe_add_pending_ports called on a global not managing any ports.");
}
}
/// https://html.spec.whatwg.org/multipage/#ports-and-garbage-collection
pub fn perform_a_message_port_garbage_collection_checkpoint(&self) {
let is_empty = if let MessagePortState::Managed(_id, message_ports) =
&mut *self.message_port_state.borrow_mut()
{
let to_be_removed: Vec<MessagePortId> = message_ports
.iter()
.filter_map(|(id, ref managed_port)| {
if managed_port.closed {
// Let the constellation know to drop this port and the one it is entangled with,
// and to forward this message to the script-process where the entangled is found.
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::RemoveMessagePort(id.clone()));
Some(id.clone())
} else {
None
}
})
.collect();
for id in to_be_removed {
message_ports.remove(&id);
}
message_ports.is_empty()
} else {
false
};
if is_empty {
self.remove_message_ports_router();
}
}
/// Remove broadcast-channels that are closed.
/// TODO: Also remove them if they do not have an event-listener.
/// see https://github.com/servo/servo/issues/25772
pub fn perform_a_broadcast_channel_garbage_collection_checkpoint(&self) {
let is_empty = if let BroadcastChannelState::Managed(router_id, ref mut channels) =
&mut *self.broadcast_channel_state.borrow_mut()
{
channels.retain(|name, ref mut channels| {
channels.retain(|ref chan| !chan.closed());
if channels.is_empty() {
let _ = self.script_to_constellation_chan().send(
ScriptMsg::RemoveBroadcastChannelNameInRouter(
router_id.clone(),
name.to_string(),
self.origin().immutable().clone(),
),
);
false
} else {
true
}
});
channels.is_empty()
} else {
false
};
if is_empty {
self.remove_broadcast_channel_router();
}
}
/// Start tracking a broadcast-channel.
pub fn track_broadcast_channel(&self, dom_channel: &BroadcastChannel) {
let mut current_state = self.broadcast_channel_state.borrow_mut();
if let BroadcastChannelState::UnManaged = &*current_state {
// Setup a route for IPC, for broadcasts from the constellation to our channels.
let (broadcast_control_sender, broadcast_control_receiver) =
ipc::channel().expect("ipc channel failure");
let context = Trusted::new(self);
let (task_source, canceller) = (
self.dom_manipulation_task_source(),
self.task_canceller(TaskSourceName::DOMManipulation),
);
let listener = BroadcastListener {
canceller,
task_source,
context,
};
ROUTER.add_route(
broadcast_control_receiver.to_opaque(),
Box::new(move |message| {
let msg = message.to();
match msg {
Ok(msg) => listener.handle(msg),
Err(err) => warn!("Error receiving a BroadcastMsg: {:?}", err),
}
}),
);
let router_id = BroadcastChannelRouterId::new();
*current_state = BroadcastChannelState::Managed(router_id.clone(), HashMap::new());
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::NewBroadcastChannelRouter(
router_id,
broadcast_control_sender,
self.origin().immutable().clone(),
));
}
if let BroadcastChannelState::Managed(router_id, channels) = &mut *current_state {
let entry = channels.entry(dom_channel.Name()).or_insert_with(|| {
let _ = self.script_to_constellation_chan().send(
ScriptMsg::NewBroadcastChannelNameInRouter(
router_id.clone(),
dom_channel.Name().to_string(),
self.origin().immutable().clone(),
),
);
VecDeque::new()
});
entry.push_back(Dom::from_ref(dom_channel));
} else {
panic!("track_broadcast_channel should have first switched the state to managed.");
}
}
/// Start tracking a message-port
pub fn track_message_port(&self, dom_port: &MessagePort, port_impl: Option<MessagePortImpl>) {
let mut current_state = self.message_port_state.borrow_mut();
if let MessagePortState::UnManaged = &*current_state {
// Setup a route for IPC, for messages from the constellation to our ports.
let (port_control_sender, port_control_receiver) =
ipc::channel().expect("ipc channel failure");
let context = Trusted::new(self);
let (task_source, canceller) = (
self.port_message_queue(),
self.task_canceller(TaskSourceName::PortMessage),
);
let listener = MessageListener {
canceller,
task_source,
context,
};
ROUTER.add_route(
port_control_receiver.to_opaque(),
Box::new(move |message| {
let msg = message.to();
match msg {
Ok(msg) => listener.notify(msg),
Err(err) => warn!("Error receiving a MessagePortMsg: {:?}", err),
}
}),
);
let router_id = MessagePortRouterId::new();
*current_state = MessagePortState::Managed(router_id.clone(), HashMap::new());
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::NewMessagePortRouter(
router_id,
port_control_sender,
));
}
if let MessagePortState::Managed(router_id, message_ports) = &mut *current_state {
if let Some(port_impl) = port_impl {
// We keep transfer-received ports as "pending",
// and only ask the constellation to complete the transfer
// if they're not re-shipped in the current task.
message_ports.insert(
dom_port.message_port_id().clone(),
ManagedMessagePort {
port_impl: Some(port_impl),
dom_port: Dom::from_ref(dom_port),
pending: true,
closed: false,
},
);
// Queue a task to complete the transfer,
// unless the port is re-transferred in the current task.
let this = Trusted::new(&*self);
let _ = self.port_message_queue().queue(
task!(process_pending_port_messages: move || {
let target_global = this.root();
target_global.maybe_add_pending_ports();
}),
&self,
);
} else {
// If this is a newly-created port, let the constellation immediately know.
let port_impl = MessagePortImpl::new(dom_port.message_port_id().clone());
message_ports.insert(
dom_port.message_port_id().clone(),
ManagedMessagePort {
port_impl: Some(port_impl),
dom_port: Dom::from_ref(dom_port),
pending: false,
closed: false,
},
);
let _ = self
.script_to_constellation_chan()
.send(ScriptMsg::NewMessagePort(
router_id.clone(),
dom_port.message_port_id().clone(),
));
};
} else {
panic!("track_message_port should have first switched the state to managed.");
}
}
/// <https://html.spec.whatwg.org/multipage/#serialization-steps>
/// defined at <https://w3c.github.io/FileAPI/#blob-section>.
/// Get the snapshot state and underlying bytes of the blob.
pub fn serialize_blob(&self, blob_id: &BlobId) -> BlobImpl {
// Note: we combine the snapshot state and underlying bytes into one call,
// which seems spec compliant.
// See https://w3c.github.io/FileAPI/#snapshot-state
let bytes = self
.get_blob_bytes(blob_id)
.expect("Could not read bytes from blob as part of serialization steps.");
let type_string = self.get_blob_type_string(blob_id);
// Note: the new BlobImpl is a clone, but with it's own BlobId.
BlobImpl::new_from_bytes(bytes, type_string)
}
fn track_blob_info(&self, blob_info: BlobInfo, blob_id: BlobId) {
let mut blob_state = self.blob_state.borrow_mut();
match &mut *blob_state {
BlobState::UnManaged => {
let mut blobs_map = HashMap::new();
blobs_map.insert(blob_id, blob_info);
*blob_state = BlobState::Managed(blobs_map);
},
BlobState::Managed(blobs_map) => {
blobs_map.insert(blob_id, blob_info);
},
}
}
/// Start tracking a blob
pub fn track_blob(&self, dom_blob: &Blob, blob_impl: BlobImpl) {
let blob_id = blob_impl.blob_id();
let blob_info = BlobInfo {
blob_impl,
tracker: BlobTracker::Blob(WeakRef::new(dom_blob)),
has_url: false,
};
self.track_blob_info(blob_info, blob_id);
}
/// Start tracking a file
pub fn track_file(&self, file: &File, blob_impl: BlobImpl) {
let blob_id = blob_impl.blob_id();
let blob_info = BlobInfo {
blob_impl,
tracker: BlobTracker::File(WeakRef::new(file)),
has_url: false,
};
self.track_blob_info(blob_info, blob_id);
}
/// Clean-up any file or blob that is unreachable from script,
/// unless it has an oustanding blob url.
/// <https://w3c.github.io/FileAPI/#lifeTime>
fn perform_a_blob_garbage_collection_checkpoint(&self) {
let mut blob_state = self.blob_state.borrow_mut();
if let BlobState::Managed(blobs_map) = &mut *blob_state {
blobs_map.retain(|_id, blob_info| {
let garbage_collected = match &blob_info.tracker {
BlobTracker::File(weak) => weak.root().is_none(),
BlobTracker::Blob(weak) => weak.root().is_none(),
};
if garbage_collected && !blob_info.has_url {
if let BlobData::File(ref f) = blob_info.blob_impl.blob_data() {
self.decrement_file_ref(f.get_id());
}
false
} else {
true
}
});
if blobs_map.is_empty() {
*blob_state = BlobState::UnManaged;
}
}
}
/// Clean-up all file related resources on document unload.
/// <https://w3c.github.io/FileAPI/#lifeTime>
pub fn clean_up_all_file_resources(&self) {
let mut blob_state = self.blob_state.borrow_mut();
if let BlobState::Managed(blobs_map) = &mut *blob_state {
blobs_map.drain().for_each(|(_id, blob_info)| {
if let BlobData::File(ref f) = blob_info.blob_impl.blob_data() {
self.decrement_file_ref(f.get_id());
}
});
}
*blob_state = BlobState::UnManaged;
}
fn decrement_file_ref(&self, id: Uuid) {
let origin = get_blob_origin(&self.get_url());
let (tx, rx) = profile_ipc::channel(self.time_profiler_chan().clone()).unwrap();
let msg = FileManagerThreadMsg::DecRef(id, origin, tx);
self.send_to_file_manager(msg);
let _ = rx.recv();
}
/// Get a slice to the inner data of a Blob,
/// In the case of a File-backed blob, this might incur synchronous read and caching.
pub fn get_blob_bytes(&self, blob_id: &BlobId) -> Result<Vec<u8>, ()> {
let parent = {
let blob_state = self.blob_state.borrow();
if let BlobState::Managed(blobs_map) = &*blob_state {
let blob_info = blobs_map
.get(blob_id)
.expect("get_blob_bytes for an unknown blob.");
match blob_info.blob_impl.blob_data() {
BlobData::Sliced(ref parent, ref rel_pos) => {
Some((parent.clone(), rel_pos.clone()))
},
_ => None,
}
} else {
panic!("get_blob_bytes called on a global not managing any blobs.");
}
};
match parent {
Some((parent_id, rel_pos)) => self.get_blob_bytes_non_sliced(&parent_id).map(|v| {
let range = rel_pos.to_abs_range(v.len());
v.index(range).to_vec()
}),
None => self.get_blob_bytes_non_sliced(blob_id),
}
}
/// Get bytes from a non-sliced blob
fn get_blob_bytes_non_sliced(&self, blob_id: &BlobId) -> Result<Vec<u8>, ()> {
let blob_state = self.blob_state.borrow();
if let BlobState::Managed(blobs_map) = &*blob_state {
let blob_info = blobs_map
.get(blob_id)
.expect("get_blob_bytes_non_sliced called for a unknown blob.");
match blob_info.blob_impl.blob_data() {
BlobData::File(ref f) => {
let (buffer, is_new_buffer) = match f.get_cache() {
Some(bytes) => (bytes, false),
None => {
let bytes = self.read_file(f.get_id())?;
(bytes, true)
},
};
// Cache
if is_new_buffer {
f.cache_bytes(buffer.clone());
}
Ok(buffer)
},
BlobData::Memory(ref s) => Ok(s.clone()),
BlobData::Sliced(_, _) => panic!("This blob doesn't have a parent."),
}
} else {
panic!("get_blob_bytes_non_sliced called on a global not managing any blobs.");
}
}
/// Get a slice to the inner data of a Blob,
/// if it's a memory blob, or it's file-id and file-size otherwise.
///
/// Note: this is almost a duplicate of `get_blob_bytes`,
/// tweaked for integration with streams.
/// TODO: merge with `get_blob_bytes` by way of broader integration with blob streams.
fn get_blob_bytes_or_file_id(&self, blob_id: &BlobId) -> BlobResult {
let parent = {
let blob_state = self.blob_state.borrow();
if let BlobState::Managed(blobs_map) = &*blob_state {
let blob_info = blobs_map
.get(blob_id)
.expect("get_blob_bytes_or_file_id for an unknown blob.");
match blob_info.blob_impl.blob_data() {
BlobData::Sliced(ref parent, ref rel_pos) => {
Some((parent.clone(), rel_pos.clone()))
},
_ => None,
}
} else {
panic!("get_blob_bytes_or_file_id called on a global not managing any blobs.");
}
};
match parent {
Some((parent_id, rel_pos)) => {
match self.get_blob_bytes_non_sliced_or_file_id(&parent_id) {
BlobResult::Bytes(bytes) => {
let range = rel_pos.to_abs_range(bytes.len());
BlobResult::Bytes(bytes.index(range).to_vec())
},
res => res,
}
},
None => self.get_blob_bytes_non_sliced_or_file_id(blob_id),
}
}
/// Get bytes from a non-sliced blob if in memory, or it's file-id and file-size.
///
/// Note: this is almost a duplicate of `get_blob_bytes_non_sliced`,
/// tweaked for integration with streams.
/// TODO: merge with `get_blob_bytes` by way of broader integration with blob streams.
fn get_blob_bytes_non_sliced_or_file_id(&self, blob_id: &BlobId) -> BlobResult {
let blob_state = self.blob_state.borrow();
if let BlobState::Managed(blobs_map) = &*blob_state {
let blob_info = blobs_map
.get(blob_id)
.expect("get_blob_bytes_non_sliced_or_file_id called for a unknown blob.");
match blob_info.blob_impl.blob_data() {
BlobData::File(ref f) => match f.get_cache() {
Some(bytes) => BlobResult::Bytes(bytes.clone()),
None => BlobResult::File(f.get_id(), f.get_size() as usize),
},
BlobData::Memory(ref s) => BlobResult::Bytes(s.clone()),
BlobData::Sliced(_, _) => panic!("This blob doesn't have a parent."),
}
} else {
panic!(
"get_blob_bytes_non_sliced_or_file_id called on a global not managing any blobs."
);
}
}
/// Get a copy of the type_string of a blob.
pub fn get_blob_type_string(&self, blob_id: &BlobId) -> String {
let blob_state = self.blob_state.borrow();
if let BlobState::Managed(blobs_map) = &*blob_state {
let blob_info = blobs_map
.get(blob_id)
.expect("get_blob_type_string called for a unknown blob.");
blob_info.blob_impl.type_string()
} else {
panic!("get_blob_type_string called on a global not managing any blobs.");
}
}
/// https://w3c.github.io/FileAPI/#dfn-size
pub fn get_blob_size(&self, blob_id: &BlobId) -> u64 {
let blob_state = self.blob_state.borrow();
if let BlobState::Managed(blobs_map) = &*blob_state {
let parent = {
let blob_info = blobs_map
.get(blob_id)
.expect("get_blob_size called for a unknown blob.");
match blob_info.blob_impl.blob_data() {
BlobData::Sliced(ref parent, ref rel_pos) => {
Some((parent.clone(), rel_pos.clone()))
},
_ => None,
}
};
match parent {
Some((parent_id, rel_pos)) => {
let parent_info = blobs_map
.get(&parent_id)
.expect("Parent of blob whose size is unknown.");
let parent_size = match parent_info.blob_impl.blob_data() {
BlobData::File(ref f) => f.get_size(),
BlobData::Memory(ref v) => v.len() as u64,
BlobData::Sliced(_, _) => panic!("Blob ancestry should be only one level."),
};
rel_pos.to_abs_range(parent_size as usize).len() as u64
},
None => {
let blob_info = blobs_map.get(blob_id).expect("Blob whose size is unknown.");
match blob_info.blob_impl.blob_data() {
BlobData::File(ref f) => f.get_size(),
BlobData::Memory(ref v) => v.len() as u64,
BlobData::Sliced(_, _) => panic!(
"It was previously checked that this blob does not have a parent."
),
}
},
}
} else {
panic!("get_blob_size called on a global not managing any blobs.");
}
}
pub fn get_blob_url_id(&self, blob_id: &BlobId) -> Uuid {
let mut blob_state = self.blob_state.borrow_mut();
if let BlobState::Managed(blobs_map) = &mut *blob_state {
let parent = {
let blob_info = blobs_map
.get_mut(blob_id)
.expect("get_blob_url_id called for a unknown blob.");
// Keep track of blobs with outstanding URLs.
blob_info.has_url = true;
match blob_info.blob_impl.blob_data() {
BlobData::Sliced(ref parent, ref rel_pos) => {
Some((parent.clone(), rel_pos.clone()))
},
_ => None,
}
};
match parent {
Some((parent_id, rel_pos)) => {
let parent_file_id = {
let parent_info = blobs_map
.get_mut(&parent_id)
.expect("Parent of blob whose url is requested is unknown.");
self.promote(parent_info, /* set_valid is */ false)
};
let parent_size = self.get_blob_size(&parent_id);
let blob_info = blobs_map
.get_mut(blob_id)
.expect("Blob whose url is requested is unknown.");
self.create_sliced_url_id(blob_info, &parent_file_id, &rel_pos, parent_size)
},
None => {
let blob_info = blobs_map
.get_mut(blob_id)
.expect("Blob whose url is requested is unknown.");
self.promote(blob_info, /* set_valid is */ true)
},
}
} else {
panic!("get_blob_url_id called on a global not managing any blobs.");
}
}
/// Get a FileID representing sliced parent-blob content
fn create_sliced_url_id(
&self,
blob_info: &mut BlobInfo,
parent_file_id: &Uuid,
rel_pos: &RelativePos,
parent_len: u64,
) -> Uuid {
let origin = get_blob_origin(&self.get_url());
let (tx, rx) = profile_ipc::channel(self.time_profiler_chan().clone()).unwrap();
let msg = FileManagerThreadMsg::AddSlicedURLEntry(
parent_file_id.clone(),
rel_pos.clone(),
tx,
origin.clone(),
);
self.send_to_file_manager(msg);
match rx.recv().expect("File manager thread is down.") {
Ok(new_id) => {
*blob_info.blob_impl.blob_data_mut() = BlobData::File(FileBlob::new(
new_id.clone(),
None,
None,
rel_pos.to_abs_range(parent_len as usize).len() as u64,
));
// Return the indirect id reference
new_id
},
Err(_) => {
// Return dummy id
Uuid::new_v4()
},
}
}
/// Promote non-Slice blob:
/// 1. Memory-based: The bytes in data slice will be transferred to file manager thread.
/// 2. File-based: If set_valid, then activate the FileID so it can serve as URL
/// Depending on set_valid, the returned FileID can be part of
/// valid or invalid Blob URL.
pub fn promote(&self, blob_info: &mut BlobInfo, set_valid: bool) -> Uuid {
let mut bytes = vec![];
let global_url = self.get_url();
match blob_info.blob_impl.blob_data_mut() {
BlobData::Sliced(_, _) => {
panic!("Sliced blobs should use create_sliced_url_id instead of promote.");
},
BlobData::File(ref f) => {
if set_valid {
let origin = get_blob_origin(&global_url);
let (tx, rx) = profile_ipc::channel(self.time_profiler_chan().clone()).unwrap();
let msg = FileManagerThreadMsg::ActivateBlobURL(f.get_id(), tx, origin.clone());
self.send_to_file_manager(msg);
match rx.recv().unwrap() {
Ok(_) => return f.get_id(),
// Return a dummy id on error
Err(_) => return Uuid::new_v4(),
}
} else {
// no need to activate
return f.get_id();
}
},
BlobData::Memory(ref mut bytes_in) => mem::swap(bytes_in, &mut bytes),
};
let origin = get_blob_origin(&global_url);
let blob_buf = BlobBuf {
filename: None,
type_string: blob_info.blob_impl.type_string(),
size: bytes.len() as u64,
bytes: bytes.to_vec(),
};
let id = Uuid::new_v4();
let msg = FileManagerThreadMsg::PromoteMemory(id, blob_buf, set_valid, origin.clone());
self.send_to_file_manager(msg);
*blob_info.blob_impl.blob_data_mut() = BlobData::File(FileBlob::new(
id.clone(),
None,
Some(bytes.to_vec()),
bytes.len() as u64,
));
id
}
fn send_to_file_manager(&self, msg: FileManagerThreadMsg) {
let resource_threads = self.resource_threads();
let _ = resource_threads.send(CoreResourceMsg::ToFileManager(msg));
}
fn read_file(&self, id: Uuid) -> Result<Vec<u8>, ()> {
let recv = self.send_msg(id);
GlobalScope::read_msg(recv)
}
/// <https://w3c.github.io/FileAPI/#blob-get-stream>
pub fn get_blob_stream(&self, blob_id: &BlobId) -> DomRoot<ReadableStream> {
let (file_id, size) = match self.get_blob_bytes_or_file_id(blob_id) {
BlobResult::Bytes(bytes) => {
// If we have all the bytes in memory, queue them and close the stream.
let stream = ReadableStream::new_from_bytes(self, bytes);
return stream;
},
BlobResult::File(id, size) => (id, size),
};
let stream = ReadableStream::new_with_external_underlying_source(
self,
ExternalUnderlyingSource::Blob(size as usize),
);
let recv = self.send_msg(file_id);
let trusted_stream = Trusted::new(&*stream.clone());
let task_canceller = self.task_canceller(TaskSourceName::FileReading);
let task_source = self.file_reading_task_source();
let mut file_listener = FileListener {
state: Some(FileListenerState::Empty(
FileListenerCallback::Stream,
FileListenerTarget::Stream(trusted_stream),
)),
task_source,
task_canceller,
};
ROUTER.add_route(
recv.to_opaque(),
Box::new(move |msg| {
file_listener.handle(
msg.to()
.expect("Deserialization of file listener msg failed."),
);
}),
);
stream
}
pub fn read_file_async(
&self,
id: Uuid,
promise: Rc<Promise>,
callback: Box<dyn Fn(Rc<Promise>, Result<Vec<u8>, Error>) + Send>,
) {
let recv = self.send_msg(id);
let trusted_promise = TrustedPromise::new(promise);
let task_canceller = self.task_canceller(TaskSourceName::FileReading);
let task_source = self.file_reading_task_source();
let mut file_listener = FileListener {
state: Some(FileListenerState::Empty(
FileListenerCallback::Promise(callback),
FileListenerTarget::Promise(trusted_promise),
)),
task_source,
task_canceller,
};
ROUTER.add_route(
recv.to_opaque(),
Box::new(move |msg| {
file_listener.handle(
msg.to()
.expect("Deserialization of file listener msg failed."),
);
}),
);
}
fn send_msg(&self, id: Uuid) -> profile_ipc::IpcReceiver<FileManagerResult<ReadFileProgress>> {
let resource_threads = self.resource_threads();
let (chan, recv) = profile_ipc::channel(self.time_profiler_chan().clone()).unwrap();
let origin = get_blob_origin(&self.get_url());
let msg = FileManagerThreadMsg::ReadFile(chan, id, origin);
let _ = resource_threads.send(CoreResourceMsg::ToFileManager(msg));
recv
}
fn read_msg(
receiver: profile_ipc::IpcReceiver<FileManagerResult<ReadFileProgress>>,
) -> Result<Vec<u8>, ()> {
let mut bytes = vec![];
loop {
match receiver.recv().unwrap() {
Ok(ReadFileProgress::Meta(mut blob_buf)) => {
bytes.append(&mut blob_buf.bytes);
},
Ok(ReadFileProgress::Partial(mut bytes_in)) => {
bytes.append(&mut bytes_in);
},
Ok(ReadFileProgress::EOF) => {
return Ok(bytes);
},
Err(_) => return Err(()),
}
}
}
pub fn permission_state_invocation_results(
&self,
) -> &DomRefCell<HashMap<String, PermissionState>> {
&self.permission_state_invocation_results
}
pub fn track_worker(
&self,
closing: Arc<AtomicBool>,
join_handle: JoinHandle<()>,
control_sender: Sender<DedicatedWorkerControlMsg>,
context: ContextForRequestInterrupt,
) {
self.list_auto_close_worker
.borrow_mut()
.push(AutoCloseWorker {
closing,
join_handle: Some(join_handle),
control_sender: control_sender,
context,
});
}
pub fn track_event_source(&self, event_source: &EventSource) {
self.event_source_tracker.track(event_source);
}
pub fn close_event_sources(&self) -> bool {
let mut canceled_any_fetch = false;
self.event_source_tracker
.for_each(
|event_source: DomRoot<EventSource>| match event_source.ReadyState() {
2 => {},
_ => {
event_source.cancel();
canceled_any_fetch = true;
},
},
);
canceled_any_fetch
}
/// Returns the global scope of the realm that the given DOM object's reflector
/// was created in.
#[allow(unsafe_code)]
pub fn from_reflector<T: DomObject>(reflector: &T) -> DomRoot<Self> {
unsafe { GlobalScope::from_object(*reflector.reflector().get_jsobject()) }
}
/// Returns the global scope of the realm that the given JS object was created in.
#[allow(unsafe_code)]
pub unsafe fn from_object(obj: *mut JSObject) -> DomRoot<Self> {
assert!(!obj.is_null());
let global = GetNonCCWObjectGlobal(obj);
global_scope_from_global_static(global)
}
/// Returns the global scope for the given JSContext
#[allow(unsafe_code)]
pub unsafe fn from_context(cx: *mut JSContext, _realm: InRealm) -> DomRoot<Self> {
let global = CurrentGlobalOrNull(cx);
assert!(!global.is_null());
global_scope_from_global(global, cx)
}
/// Returns the global scope for the given SafeJSContext
#[allow(unsafe_code)]
pub fn from_safe_context(cx: SafeJSContext, realm: InRealm) -> DomRoot<Self> {
unsafe { Self::from_context(*cx, realm) }
}
/// Returns the global object of the realm that the given JS object
/// was created in, after unwrapping any wrappers.
#[allow(unsafe_code)]
pub unsafe fn from_object_maybe_wrapped(
mut obj: *mut JSObject,
cx: *mut JSContext,
) -> DomRoot<Self> {
if IsWrapper(obj) {
obj = UnwrapObjectDynamic(obj, cx, /* stopAtWindowProxy = */ 0);
assert!(!obj.is_null());
}
GlobalScope::from_object(obj)
}
pub fn add_uncaught_rejection(&self, rejection: HandleObject) {
self.uncaught_rejections
.borrow_mut()
.push(Heap::boxed(rejection.get()));
}
pub fn remove_uncaught_rejection(&self, rejection: HandleObject) {
let mut uncaught_rejections = self.uncaught_rejections.borrow_mut();
if let Some(index) = uncaught_rejections
.iter()
.position(|promise| *promise == Heap::boxed(rejection.get()))
{
uncaught_rejections.remove(index);
}
}
pub fn get_uncaught_rejections(&self) -> &DomRefCell<Vec<Box<Heap<*mut JSObject>>>> {
&self.uncaught_rejections
}
pub fn add_consumed_rejection(&self, rejection: HandleObject) {
self.consumed_rejections
.borrow_mut()
.push(Heap::boxed(rejection.get()));
}
pub fn remove_consumed_rejection(&self, rejection: HandleObject) {
let mut consumed_rejections = self.consumed_rejections.borrow_mut();
if let Some(index) = consumed_rejections
.iter()
.position(|promise| *promise == Heap::boxed(rejection.get()))
{
consumed_rejections.remove(index);
}
}
pub fn get_consumed_rejections(&self) -> &DomRefCell<Vec<Box<Heap<*mut JSObject>>>> {
&self.consumed_rejections
}
pub fn set_module_map(&self, url: ServoUrl, module: ModuleTree) {
self.module_map.borrow_mut().insert(url, Rc::new(module));
}
pub fn get_module_map(&self) -> &DomRefCell<HashMap<ServoUrl, Rc<ModuleTree>>> {
&self.module_map
}
pub fn set_inline_module_map(&self, script_id: ScriptId, module: ModuleTree) {
self.inline_module_map
.borrow_mut()
.insert(script_id, Rc::new(module));
}
pub fn get_inline_module_map(&self) -> &DomRefCell<HashMap<ScriptId, Rc<ModuleTree>>> {
&self.inline_module_map
}
#[allow(unsafe_code)]
pub fn get_cx(&self) -> SafeJSContext {
unsafe { SafeJSContext::from_ptr(Runtime::get()) }
}
pub fn crypto(&self) -> DomRoot<Crypto> {
self.crypto.or_init(|| Crypto::new(self))
}
pub fn live_devtools_updates(&self) -> bool {
self.devtools_wants_updates.get()
}
pub fn set_devtools_wants_updates(&self, value: bool) {
self.devtools_wants_updates.set(value);
}
pub fn time(&self, label: DOMString) -> Result<(), ()> {
let mut timers = self.console_timers.borrow_mut();
if timers.len() >= 10000 {
return Err(());
}
match timers.entry(label) {
Entry::Vacant(entry) => {
entry.insert(timestamp_in_ms(get_time()));
Ok(())
},
Entry::Occupied(_) => Err(()),
}
}
pub fn time_end(&self, label: &str) -> Result<u64, ()> {
self.console_timers
.borrow_mut()
.remove(label)
.ok_or(())
.map(|start| timestamp_in_ms(get_time()) - start)
}
/// Get an `&IpcSender<ScriptToDevtoolsControlMsg>` to send messages
/// to the devtools thread when available.
pub fn devtools_chan(&self) -> Option<&IpcSender<ScriptToDevtoolsControlMsg>> {
self.devtools_chan.as_ref()
}
pub fn issue_page_warning(&self, warning: &str) {
if let Some(ref chan) = self.devtools_chan {
let _ = chan.send(ScriptToDevtoolsControlMsg::ReportPageError(
self.pipeline_id.clone(),
PageError {
type_: "PageError".to_string(),
errorMessage: warning.to_string(),
sourceName: self.get_url().to_string(),
lineText: "".to_string(),
lineNumber: 0,
columnNumber: 0,
category: "script".to_string(),
timeStamp: 0, //TODO
error: false,
warning: true,
exception: true,
strict: false,
private: false,
},
));
}
}
/// Get a sender to the memory profiler thread.
pub fn mem_profiler_chan(&self) -> &profile_mem::ProfilerChan {
&self.mem_profiler_chan
}
/// Get a sender to the time profiler thread.
pub fn time_profiler_chan(&self) -> &profile_time::ProfilerChan {
&self.time_profiler_chan
}
/// Get a sender to the constellation thread.
pub fn script_to_constellation_chan(&self) -> &ScriptToConstellationChan {
&self.script_to_constellation_chan
}
pub fn send_to_embedder(&self, msg: EmbedderMsg) {
self.send_to_constellation(ScriptMsg::ForwardToEmbedder(msg));
}
pub fn send_to_constellation(&self, msg: ScriptMsg) {
self.script_to_constellation_chan().send(msg).unwrap();
}
pub fn scheduler_chan(&self) -> &IpcSender<TimerSchedulerMsg> {
&self.scheduler_chan
}
/// Get the `PipelineId` for this global scope.
pub fn pipeline_id(&self) -> PipelineId {
self.pipeline_id
}
/// Get the origin for this global scope
pub fn origin(&self) -> &MutableOrigin {
&self.origin
}
/// Get the creation_url for this global scope
pub fn creation_url(&self) -> &Option<ServoUrl> {
&self.creation_url
}
pub fn image_cache(&self) -> Arc<dyn ImageCache> {
if let Some(window) = self.downcast::<Window>() {
return window.image_cache();
}
if let Some(worker) = self.downcast::<DedicatedWorkerGlobalScope>() {
return worker.image_cache();
}
if let Some(worker) = self.downcast::<PaintWorkletGlobalScope>() {
return worker.image_cache();
}
unreachable!();
}
/// Get the [base url](https://html.spec.whatwg.org/multipage/#api-base-url)
/// for this global scope.
pub fn api_base_url(&self) -> ServoUrl {
if let Some(window) = self.downcast::<Window>() {
// https://html.spec.whatwg.org/multipage/#script-settings-for-browsing-contexts:api-base-url
return window.Document().base_url();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
// https://html.spec.whatwg.org/multipage/#script-settings-for-workers:api-base-url
return worker.get_url().clone();
}
if let Some(worklet) = self.downcast::<WorkletGlobalScope>() {
// https://drafts.css-houdini.org/worklets/#script-settings-for-worklets
return worklet.base_url();
}
unreachable!();
}
/// Get the URL for this global scope.
pub fn get_url(&self) -> ServoUrl {
if let Some(window) = self.downcast::<Window>() {
return window.get_url();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.get_url().clone();
}
if let Some(worklet) = self.downcast::<WorkletGlobalScope>() {
// TODO: is this the right URL to return?
return worklet.base_url();
}
unreachable!();
}
/// Determine the Referrer for a request whose Referrer is "client"
pub fn get_referrer(&self) -> Referrer {
// Step 3 of https://w3c.github.io/webappsec-referrer-policy/#determine-requests-referrer
if let Some(window) = self.downcast::<Window>() {
// Substep 3.1
// Substep 3.1.1
let mut document = window.Document();
// Substep 3.1.2
if let ImmutableOrigin::Opaque(_) = document.origin().immutable() {
return Referrer::NoReferrer;
}
let mut url = document.url();
// Substep 3.1.3
while url.as_str() == "about:srcdoc" {
document = document
.browsing_context()
.expect("iframe should have browsing context")
.parent()
.expect("iframes browsing_context should have parent")
.document()
.expect("iframes parent should have document");
url = document.url();
}
// Substep 3.1.4
Referrer::Client(url)
} else {
// Substep 3.2
Referrer::Client(self.get_url())
}
}
/// Extract a `Window`, panic if the global object is not a `Window`.
pub fn as_window(&self) -> &Window {
self.downcast::<Window>().expect("expected a Window scope")
}
/// <https://html.spec.whatwg.org/multipage/#report-the-error>
pub fn report_an_error(&self, error_info: ErrorInfo, value: HandleValue) {
// Step 1.
if self.in_error_reporting_mode.get() {
return;
}
// Step 2.
self.in_error_reporting_mode.set(true);
// Steps 3-6.
// FIXME(#13195): muted errors.
let event = ErrorEvent::new(
self,
atom!("error"),
EventBubbles::DoesNotBubble,
EventCancelable::Cancelable,
error_info.message.as_str().into(),
error_info.filename.as_str().into(),
error_info.lineno,
error_info.column,
value,
);
// Step 7.
let event_status = event.upcast::<Event>().fire(self.upcast::<EventTarget>());
// Step 8.
self.in_error_reporting_mode.set(false);
// Step 9.
if event_status == EventStatus::NotCanceled {
// https://html.spec.whatwg.org/multipage/#runtime-script-errors-2
if let Some(dedicated) = self.downcast::<DedicatedWorkerGlobalScope>() {
dedicated.forward_error_to_worker_object(error_info);
} else if self.is::<Window>() {
if let Some(ref chan) = self.devtools_chan {
let _ = chan.send(ScriptToDevtoolsControlMsg::ReportPageError(
self.pipeline_id.clone(),
PageError {
type_: "PageError".to_string(),
errorMessage: error_info.message.clone(),
sourceName: error_info.filename.clone(),
lineText: "".to_string(), //TODO
lineNumber: error_info.lineno,
columnNumber: error_info.column,
category: "script".to_string(),
timeStamp: 0, //TODO
error: true,
warning: false,
exception: true,
strict: false,
private: false,
},
));
}
}
}
}
/// Get the `&ResourceThreads` for this global scope.
pub fn resource_threads(&self) -> &ResourceThreads {
&self.resource_threads
}
/// Get the `CoreResourceThread` for this global scope.
pub fn core_resource_thread(&self) -> CoreResourceThread {
self.resource_threads().sender()
}
/// `ScriptChan` to send messages to the event loop of this global scope.
pub fn script_chan(&self) -> Box<dyn ScriptChan + Send> {
if let Some(window) = self.downcast::<Window>() {
return MainThreadScriptChan(window.main_thread_script_chan().clone()).clone();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.script_chan();
}
unreachable!();
}
/// `TaskSource` to send messages to the networking task source of
/// this global scope.
pub fn networking_task_source(&self) -> NetworkingTaskSource {
if let Some(window) = self.downcast::<Window>() {
return window.task_manager().networking_task_source();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.networking_task_source();
}
unreachable!();
}
/// `TaskSource` to send messages to the port message queue of
/// this global scope.
pub fn port_message_queue(&self) -> PortMessageQueue {
if let Some(window) = self.downcast::<Window>() {
return window.task_manager().port_message_queue();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.port_message_queue();
}
unreachable!();
}
/// `TaskSource` to send messages to the timer queue of
/// this global scope.
pub fn timer_task_source(&self) -> TimerTaskSource {
if let Some(window) = self.downcast::<Window>() {
return window.task_manager().timer_task_source();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.timer_task_source();
}
unreachable!();
}
/// `TaskSource` to send messages to the remote-event task source of
/// this global scope.
pub fn remote_event_task_source(&self) -> RemoteEventTaskSource {
if let Some(window) = self.downcast::<Window>() {
return window.task_manager().remote_event_task_source();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.remote_event_task_source();
}
unreachable!();
}
/// `TaskSource` to send messages to the websocket task source of
/// this global scope.
pub fn websocket_task_source(&self) -> WebsocketTaskSource {
if let Some(window) = self.downcast::<Window>() {
return window.task_manager().websocket_task_source();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.websocket_task_source();
}
unreachable!();
}
/// Evaluate JS code on this global scope.
pub fn evaluate_js_on_global_with_result(
&self,
code: &str,
rval: MutableHandleValue,
fetch_options: ScriptFetchOptions,
script_base_url: ServoUrl,
) -> bool {
let source_code = SourceCode::Text(Rc::new(DOMString::from_string((*code).to_string())));
self.evaluate_script_on_global_with_result(
&source_code,
"",
rval,
1,
fetch_options,
script_base_url,
)
}
/// Evaluate a JS script on this global scope.
#[allow(unsafe_code)]
pub fn evaluate_script_on_global_with_result(
&self,
code: &SourceCode,
filename: &str,
rval: MutableHandleValue,
line_number: u32,
fetch_options: ScriptFetchOptions,
script_base_url: ServoUrl,
) -> bool {
let metadata = profile_time::TimerMetadata {
url: if filename.is_empty() {
self.get_url().as_str().into()
} else {
filename.into()
},
iframe: profile_time::TimerMetadataFrameType::RootWindow,
incremental: profile_time::TimerMetadataReflowType::FirstReflow,
};
profile_time::profile(
profile_time::ProfilerCategory::ScriptEvaluate,
Some(metadata),
self.time_profiler_chan().clone(),
|| {
let cx = self.get_cx();
let ar = enter_realm(&*self);
let _aes = AutoEntryScript::new(self);
unsafe {
rooted!(in(*cx) let mut compiled_script = std::ptr::null_mut::<JSScript>());
match code {
SourceCode::Text(text_code) => {
let options = CompileOptionsWrapper::new(*cx, filename, line_number);
debug!("compiling dom string");
compiled_script.set(Compile1(
*cx,
options.ptr,
&mut transform_str_to_source_text(text_code),
));
if compiled_script.is_null() {
debug!("error compiling Dom string");
report_pending_exception(*cx, true, InRealm::Entered(&ar));
return false;
}
},
SourceCode::Compiled(pre_compiled_script) => {
compiled_script.set(pre_compiled_script.source_code.get());
},
};
assert!(!compiled_script.is_null());
rooted!(in(*cx) let mut script_private = UndefinedValue());
JS_GetScriptPrivate(*compiled_script, script_private.handle_mut());
// When `ScriptPrivate` for the compiled script is undefined,
// we need to set it so that it can be used in dynamic import context.
if script_private.is_undefined() {
debug!("Set script private for {}", script_base_url);
let module_script_data = Rc::new(ModuleScript::new(
script_base_url,
fetch_options,
// We can't initialize an module owner here because
// the executing context of script might be different
// from the dynamic import script's executing context.
None,
));
SetScriptPrivate(
*compiled_script,
&PrivateValue(Rc::into_raw(module_script_data) as *const _),
);
}
let result = JS_ExecuteScript(*cx, compiled_script.handle(), rval);
if !result {
debug!("error evaluating Dom string");
report_pending_exception(*cx, true, InRealm::Entered(&ar));
}
maybe_resume_unwind();
result
}
},
)
}
/// <https://html.spec.whatwg.org/multipage/#timer-initialisation-steps>
pub fn schedule_callback(
&self,
callback: OneshotTimerCallback,
duration: MsDuration,
) -> OneshotTimerHandle {
self.setup_timers();
self.timers
.schedule_callback(callback, duration, self.timer_source())
}
pub fn unschedule_callback(&self, handle: OneshotTimerHandle) {
self.timers.unschedule_callback(handle);
}
/// <https://html.spec.whatwg.org/multipage/#timer-initialisation-steps>
pub fn set_timeout_or_interval(
&self,
callback: TimerCallback,
arguments: Vec<HandleValue>,
timeout: i32,
is_interval: IsInterval,
) -> i32 {
self.setup_timers();
self.timers.set_timeout_or_interval(
self,
callback,
arguments,
timeout,
is_interval,
self.timer_source(),
)
}
pub fn clear_timeout_or_interval(&self, handle: i32) {
self.timers.clear_timeout_or_interval(self, handle);
}
pub fn queue_function_as_microtask(&self, callback: Rc<VoidFunction>) {
self.enqueue_microtask(Microtask::User(UserMicrotask {
callback: callback,
pipeline: self.pipeline_id(),
}))
}
pub fn create_image_bitmap(
&self,
image: ImageBitmapSource,
options: &ImageBitmapOptions,
) -> Rc<Promise> {
let in_realm_proof = AlreadyInRealm::assert(&self);
let p = Promise::new_in_current_realm(&self, InRealm::Already(&in_realm_proof));
if options.resizeWidth.map_or(false, |w| w == 0) {
p.reject_error(Error::InvalidState);
return p;
}
if options.resizeHeight.map_or(false, |w| w == 0) {
p.reject_error(Error::InvalidState);
return p;
}
let promise = match image {
ImageBitmapSource::HTMLCanvasElement(ref canvas) => {
// https://html.spec.whatwg.org/multipage/#check-the-usability-of-the-image-argument
if !canvas.is_valid() {
p.reject_error(Error::InvalidState);
return p;
}
if let Some((data, size)) = canvas.fetch_all_data() {
let data = data
.map(|data| data.to_vec())
.unwrap_or_else(|| vec![0; size.area() as usize * 4]);
let image_bitmap = ImageBitmap::new(&self, size.width, size.height).unwrap();
image_bitmap.set_bitmap_data(data);
image_bitmap.set_origin_clean(canvas.origin_is_clean());
p.resolve_native(&(image_bitmap));
}
p
},
ImageBitmapSource::OffscreenCanvas(ref canvas) => {
// https://html.spec.whatwg.org/multipage/#check-the-usability-of-the-image-argument
if !canvas.is_valid() {
p.reject_error(Error::InvalidState);
return p;
}
if let Some((data, size)) = canvas.fetch_all_data() {
let data = data
.map(|data| data.to_vec())
.unwrap_or_else(|| vec![0; size.area() as usize * 4]);
let image_bitmap = ImageBitmap::new(&self, size.width, size.height).unwrap();
image_bitmap.set_bitmap_data(data);
image_bitmap.set_origin_clean(canvas.origin_is_clean());
p.resolve_native(&(image_bitmap));
}
p
},
_ => {
p.reject_error(Error::NotSupported);
return p;
},
};
promise
}
pub fn fire_timer(&self, handle: TimerEventId) {
self.timers.fire_timer(handle, self);
}
pub fn resume(&self) {
self.timers.resume();
}
pub fn suspend(&self) {
self.timers.suspend();
}
pub fn slow_down_timers(&self) {
self.timers.slow_down();
}
pub fn speed_up_timers(&self) {
self.timers.speed_up();
}
fn timer_source(&self) -> TimerSource {
if self.is::<Window>() {
return TimerSource::FromWindow(self.pipeline_id());
}
if self.is::<WorkerGlobalScope>() {
return TimerSource::FromWorker;
}
unreachable!();
}
/// Returns a boolean indicating whether the event-loop
/// where this global is running on can continue running JS.
pub fn can_continue_running(&self) -> bool {
if self.downcast::<Window>().is_some() {
return ScriptThread::can_continue_running();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return !worker.is_closing();
}
// TODO: plug worklets into this.
true
}
/// Returns the task canceller of this global to ensure that everything is
/// properly cancelled when the global scope is destroyed.
pub fn task_canceller(&self, name: TaskSourceName) -> TaskCanceller {
if let Some(window) = self.downcast::<Window>() {
return window.task_manager().task_canceller(name);
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
// Note: the "name" is not passed to the worker,
// because 'closing' it only requires one task canceller for all task sources.
// https://html.spec.whatwg.org/multipage/#dom-workerglobalscope-closing
return worker.task_canceller();
}
unreachable!();
}
/// Perform a microtask checkpoint.
pub fn perform_a_microtask_checkpoint(&self) {
// Only perform the checkpoint if we're not shutting down.
if self.can_continue_running() {
self.microtask_queue.checkpoint(
self.get_cx(),
|_| Some(DomRoot::from_ref(self)),
vec![DomRoot::from_ref(self)],
);
}
}
/// Enqueue a microtask for subsequent execution.
pub fn enqueue_microtask(&self, job: Microtask) {
self.microtask_queue.enqueue(job, self.get_cx());
}
/// Create a new sender/receiver pair that can be used to implement an on-demand
/// event loop. Used for implementing web APIs that require blocking semantics
/// without resorting to nested event loops.
pub fn new_script_pair(&self) -> (Box<dyn ScriptChan + Send>, Box<dyn ScriptPort + Send>) {
if let Some(window) = self.downcast::<Window>() {
return window.new_script_pair();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.new_script_pair();
}
unreachable!();
}
/// Returns the microtask queue of this global.
pub fn microtask_queue(&self) -> &Rc<MicrotaskQueue> {
&self.microtask_queue
}
/// Process a single event as if it were the next event
/// in the queue for the event-loop where this global scope is running on.
/// Returns a boolean indicating whether further events should be processed.
pub fn process_event(&self, msg: CommonScriptMsg) -> bool {
if self.is::<Window>() {
return ScriptThread::process_event(msg);
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.process_event(msg);
}
unreachable!();
}
pub fn dom_manipulation_task_source(&self) -> DOMManipulationTaskSource {
if let Some(window) = self.downcast::<Window>() {
return window.task_manager().dom_manipulation_task_source();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.dom_manipulation_task_source();
}
unreachable!();
}
/// Channel to send messages to the file reading task source of
/// this of this global scope.
pub fn file_reading_task_source(&self) -> FileReadingTaskSource {
if let Some(window) = self.downcast::<Window>() {
return window.task_manager().file_reading_task_source();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.file_reading_task_source();
}<|fim▁hole|> unreachable!();
}
pub fn runtime_handle(&self) -> ParentRuntime {
if self.is::<Window>() {
ScriptThread::runtime_handle()
} else if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
worker.runtime_handle()
} else {
unreachable!()
}
}
/// Returns the ["current"] global object.
///
/// ["current"]: https://html.spec.whatwg.org/multipage/#current
#[allow(unsafe_code)]
pub fn current() -> Option<DomRoot<Self>> {
unsafe {
let cx = Runtime::get();
assert!(!cx.is_null());
let global = CurrentGlobalOrNull(cx);
if global.is_null() {
None
} else {
Some(global_scope_from_global(global, cx))
}
}
}
/// Returns the ["entry"] global object.
///
/// ["entry"]: https://html.spec.whatwg.org/multipage/#entry
pub fn entry() -> DomRoot<Self> {
entry_global()
}
/// Returns the ["incumbent"] global object.
///
/// ["incumbent"]: https://html.spec.whatwg.org/multipage/#incumbent
pub fn incumbent() -> Option<DomRoot<Self>> {
incumbent_global()
}
pub fn performance(&self) -> DomRoot<Performance> {
if let Some(window) = self.downcast::<Window>() {
return window.Performance();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.Performance();
}
unreachable!();
}
/// Channel to send messages to the performance timeline task source
/// of this global scope.
pub fn performance_timeline_task_source(&self) -> PerformanceTimelineTaskSource {
if let Some(window) = self.downcast::<Window>() {
return window.task_manager().performance_timeline_task_source();
}
if let Some(worker) = self.downcast::<WorkerGlobalScope>() {
return worker.performance_timeline_task_source();
}
unreachable!();
}
// https://w3c.github.io/performance-timeline/#supportedentrytypes-attribute
pub fn supported_performance_entry_types(&self, cx: SafeJSContext) -> JSVal {
if let Some(types) = &*self.frozen_supported_performance_entry_types.borrow() {
return types.get();
}
let types: Vec<DOMString> = VALID_ENTRY_TYPES
.iter()
.map(|t| DOMString::from(t.to_string()))
.collect();
let frozen_types = to_frozen_array(types.as_slice(), cx);
// Safety: need to create the Heap value in its final memory location before setting it.
*self.frozen_supported_performance_entry_types.borrow_mut() = Some(Heap::default());
self.frozen_supported_performance_entry_types
.borrow()
.as_ref()
.unwrap()
.set(frozen_types);
frozen_types
}
pub fn is_headless(&self) -> bool {
self.is_headless
}
pub fn get_user_agent(&self) -> Cow<'static, str> {
self.user_agent.clone()
}
pub fn get_https_state(&self) -> HttpsState {
self.https_state.get()
}
pub fn set_https_state(&self, https_state: HttpsState) {
self.https_state.set(https_state);
}
pub fn is_secure_context(&self) -> bool {
if Some(false) == self.inherited_secure_context {
return false;
}
if let Some(creation_url) = self.creation_url() {
if creation_url.scheme() == "blob" && Some(true) == self.inherited_secure_context {
return true;
}
return creation_url.is_potentially_trustworthy();
}
false
}
/// https://www.w3.org/TR/CSP/#get-csp-of-object
pub fn get_csp_list(&self) -> Option<CspList> {
if let Some(window) = self.downcast::<Window>() {
return window.Document().get_csp_list().map(|c| c.clone());
}
// TODO: Worker and Worklet global scopes.
None
}
pub fn wgpu_id_hub(&self) -> Arc<Mutex<Identities>> {
self.gpu_id_hub.clone()
}
pub fn add_gpu_device(&self, device: &GPUDevice) {
self.gpu_devices
.borrow_mut()
.insert(device.id(), Dom::from_ref(device));
}
pub fn remove_gpu_device(&self, device: WebGPUDevice) {
let _ = self.gpu_devices.borrow_mut().remove(&device);
}
pub fn handle_wgpu_msg(
&self,
device: WebGPUDevice,
scope: Option<ErrorScopeId>,
result: WebGPUOpResult,
) {
self.gpu_devices
.borrow()
.get(&device)
.expect("GPUDevice not found")
.handle_server_msg(scope, result);
}
pub(crate) fn current_group_label(&self) -> Option<DOMString> {
self.console_group_stack
.borrow()
.last()
.map(|label| DOMString::from(format!("[{}]", label)))
}
pub(crate) fn push_console_group(&self, group: DOMString) {
self.console_group_stack.borrow_mut().push(group);
}
pub(crate) fn pop_console_group(&self) {
let _ = self.console_group_stack.borrow_mut().pop();
}
pub(crate) fn dynamic_module_list(&self) -> RefMut<DynamicModuleList> {
self.dynamic_modules.borrow_mut()
}
}
fn timestamp_in_ms(time: Timespec) -> u64 {
(time.sec * 1000 + (time.nsec / 1000000) as i64) as u64
}
/// Returns the Rust global scope from a JS global object.
#[allow(unsafe_code)]
unsafe fn global_scope_from_global(
global: *mut JSObject,
cx: *mut JSContext,
) -> DomRoot<GlobalScope> {
assert!(!global.is_null());
let clasp = get_object_class(global);
assert_ne!(
((*clasp).flags & (JSCLASS_IS_DOMJSCLASS | JSCLASS_IS_GLOBAL)),
0
);
root_from_object(global, cx).unwrap()
}
/// Returns the Rust global scope from a JS global object.
#[allow(unsafe_code)]
unsafe fn global_scope_from_global_static(global: *mut JSObject) -> DomRoot<GlobalScope> {
assert!(!global.is_null());
let clasp = get_object_class(global);
assert_ne!(
((*clasp).flags & (JSCLASS_IS_DOMJSCLASS | JSCLASS_IS_GLOBAL)),
0
);
root_from_object_static(global).unwrap()
}<|fim▁end|>
| |
<|file_name|>unloggedin.py<|end_file_name|><|fim▁begin|>"""
Commands that are available from the connect screen.
"""
import re
import traceback
from django.conf import settings
from src.players.models import PlayerDB
from src.objects.models import ObjectDB
from src.server.models import ServerConfig
from src.comms.models import Channel
from src.utils import create, logger, utils, ansi
from src.commands.default.muxcommand import MuxCommand
from src.commands.cmdhandler import CMD_LOGINSTART
# limit symbol import for API
__all__ = ("CmdUnconnectedConnect", "CmdUnconnectedCreate", "CmdUnconnectedQuit", "CmdUnconnectedLook", "CmdUnconnectedHelp", "Magic")
CONNECTION_SCREEN_MODULE = settings.CONNECTION_SCREEN_MODULE
CONNECTION_SCREEN = ""
try:
CONNECTION_SCREEN = ansi.parse_ansi(utils.string_from_module(CONNECTION_SCREEN_MODULE))
except Exception:
pass
if not CONNECTION_SCREEN:
CONNECTION_SCREEN = "\nEvennia: Error in CONNECTION_SCREEN MODULE (randomly picked connection screen variable is not a string). \nEnter 'help' for aid."
class Magic(MuxCommand):
"""
Hidden command for the web client's magic cookie authenticator.
"""
key = "magic"
def func(self):
session = self.caller
player = PlayerDB.objects.player_search(self.lhs)
if len(player) != 1:
player = None
else:
player = player[0]
if player.name.lower() != self.lhs.lower():
player=None
pswd = None
if player:
pswd = self.rhs == player.db.magic_cookie
if not (player and pswd):
# No playername or password match
session.msg("Could not verify Magic Cookie. Please email the server administrator for assistance.")
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==player.name for tup in bans)
or
any(tup[2].match(session.address[0]) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here."
string += "\nIf you feel this ban is in error, please email an admin.{x"
session.msg(string)
session.execute_cmd("quit")
return
session.sessionhandler.login(session, player)
class Connect(MuxCommand):
"""
Connect to the game.
Usage (at login screen):
connect playername password
connect "player name" "pass word"
Use the create command to first create an account before logging in.
If you have spaces in your name, enclose it in quotes.
"""
key = "connect"
aliases = ["conn", "con", "co"]
locks = "cmd:all()" # not really needed
def func(self):
"""
Uses the Django admin api. Note that unlogged-in commands
have a unique position in that their func() receives
a session object instead of a source_object like all
other types of logged-in commands (this is because
there is no object yet before the player has logged in)
"""
session = self.caller
args = self.args
# extract quoted parts
parts = [part.strip() for part in re.split(r"\"|\'", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no quotes being found
parts = parts[0].split(None, 1)
if len(parts) != 2:
session.msg("\n\r Usage (without <>): connect <name> <password>")
return
playername, password = parts
# Match account name and check password
player = PlayerDB.objects.player_search(playername)
if len(player) != 1:
player = None
else:
player = player[0]
if player.name.lower() != playername.lower():
player=None
pswd = None
if player:
pswd = player.check_password(password)
if not (player and pswd):
# No playername or password match
string = "Wrong login information given.\nIf you have spaces in your name or "
string += "password, don't forget to enclose it in quotes. Also capitalization matters."
string += "\nIf you are new you should first create a new account "
string += "using the 'create' command."
session.msg(string)
return
# Check IP and/or name bans
bans = ServerConfig.objects.conf("server_bans")
if bans and (any(tup[0]==player.name for tup in bans)
or
any(tup[2].match(session.address[0]) for tup in bans if tup[2])):
# this is a banned IP or name!
string = "{rYou have been banned and cannot continue from here."
string += "\nIf you feel this ban is in error, please email an admin.{x"
session.msg(string)
session.execute_cmd("quit")
return
# actually do the login. This will call all other hooks:
# session.at_init()
# if character:
# at_first_login() # only once
# at_pre_login()
# player.at_post_login() - calls look if no character is set
# character.at_post_login() - this calls look command by default
session.sessionhandler.login(session, player)
class Create(MuxCommand):
"""
Create a new account.
Usage (at login screen):
create <playername> <password>
create "player name" "pass word"
This creates a new player account.
If you have spaces in your name, enclose it in quotes.
"""
key = "create"
aliases = ["cre", "cr"]
locks = "cmd:all()"
def func(self):
"Do checks and create account"
session = self.caller
args = self.args.strip()
# extract quoted parts
parts = [part.strip() for part in re.split(r"\"|\'", args) if part.strip()]
if len(parts) == 1:
# this was (hopefully) due to no quotes being found
parts = parts[0].split(None, 1)
if len(parts) != 2:
string = "\n Usage (without <>): create <name> <password>"
string += "\nIf <name> or <password> contains spaces, enclose it in quotes."
session.msg(string)
return
playername, password = parts
print "playername '%s', password: '%s'" % (playername, password)
# sanity checks
if not re.findall('^[\w. @+-]+$', playername) or not (0 < len(playername) <= 30):
# this echoes the restrictions made by django's auth module (except not
# allowing spaces, for convenience of logging in).
string = "\n\r Playername can max be 30 characters or fewer. Letters, spaces, digits and @/./+/-/_ only."
session.msg(string)
return
# strip excessive spaces in playername
playername = re.sub(r"\s+", " ", playername).strip()
if PlayerDB.objects.filter(user__username__iexact=playername) or PlayerDB.objects.filter(username__iexact=playername):
# player already exists (we also ignore capitalization here)
session.msg("Sorry, there is already a player with the name '%s'." % playername)
return
if not re.findall('^[\w. @+-]+$', password) or not (3 < len(password)):
string = "\n\r Password should be longer than 3 characers. Letters, spaces, digits and @\.\+\-\_ only."
string += "\nFor best security, make it longer than 8 characters. You can also use a phrase of"
string += "\nmany words if you enclose the password in quotes."
session.msg(string)
return
# everything's ok. Create the new player account.
try:
default_home = ObjectDB.objects.get_id(settings.CHARACTER_DEFAULT_HOME)
typeclass = settings.BASE_CHARACTER_TYPECLASS
permissions = settings.PERMISSION_PLAYER_DEFAULT
try:<|fim▁hole|> character_typeclass=typeclass,
character_location=default_home,
character_home=default_home)
except Exception:
session.msg("There was an error creating the default Character/Player:\n%s\n If this problem persists, contact an admin.")
return
new_player = new_character.player
# This needs to be called so the engine knows this player is logging in for the first time.
# (so it knows to call the right hooks during login later)
utils.init_new_player(new_player)
# join the new player to the public channel
pchanneldef = settings.CHANNEL_PUBLIC
if pchanneldef:
pchannel = Channel.objects.get_channel(pchanneldef[0])
if not pchannel.connect_to(new_player):
string = "New player '%s' could not connect to public channel!" % new_player.key
logger.log_errmsg(string)
# allow only the character itself and the player to puppet this character (and Immortals).
new_character.locks.add("puppet:id(%i) or pid(%i) or perm(Immortals) or pperm(Immortals)" %
(new_character.id, new_player.id))
# If no description is set, set a default description
if not new_character.db.desc:
new_character.db.desc = "This is a Player."
# tell the caller everything went well.
string = "A new account '%s' was created. Welcome!"
if " " in playername:
string += "\n\nYou can now log in with the command 'connect \"%s\" <your password>'."
else:
string += "\n\nYou can now log with the command 'connect %s <your password>'."
session.msg(string % (playername, playername))
except Exception:
# We are in the middle between logged in and -not, so we have to handle tracebacks
# ourselves at this point. If we don't, we won't see any errors at all.
string = "%s\nThis is a bug. Please e-mail an admin if the problem persists."
session.msg(string % (traceback.format_exc()))
logger.log_errmsg(traceback.format_exc())
class CmdUnconnectedQuit(MuxCommand):
"""
We maintain a different version of the quit command
here for unconnected players for the sake of simplicity. The logged in
version is a bit more complicated.
"""
key = "quit"
aliases = ["q", "qu"]
locks = "cmd:all()"
def func(self):
"Simply close the connection."
session = self.caller
session.msg("Good bye! Disconnecting ...")
session.session_disconnect()
class CmdUnconnectedLook(MuxCommand):
"""
This is an unconnected version of the look command for simplicity.
This is called by the server and kicks everything in gear.
All it does is display the connect screen.
"""
key = CMD_LOGINSTART
aliases = ["look", "l"]
locks = "cmd:all()"
def func(self):
"Show the connect screen."
self.caller.msg(CONNECTION_SCREEN)
class CmdUnconnectedHelp(MuxCommand):
"""
This is an unconnected version of the help command,
for simplicity. It shows a pane of info.
"""
key = "help"
aliases = ["h", "?"]
locks = "cmd:all()"
def func(self):
"Shows help"
string = \
"""
You are not yet logged into the game. Commands available at this point:
{wcreate, connect, look, help, quit{n
To login to the system, you need to do one of the following:
{w1){n If you have no previous account, you need to use the 'create'
command.
{wcreate Anna c67jHL8p{n
Note that if you use spaces in your name, you have to enclose in quotes.
{wcreate "Anna the Barbarian" c67jHL8p{n
It's always a good idea (not only here, but everywhere on the net)
to not use a regular word for your password. Make it longer than
6 characters or write a passphrase.
{w2){n If you have an account already, either because you just created
one in {w1){n above or you are returning, use the 'connect' command:
{wconnect Anna c67jHL8p{n
(Again, if there are spaces in the name you have to enclose it in quotes).
This should log you in. Run {whelp{n again once you're logged in
to get more aid. Hope you enjoy your stay!
You can use the {wlook{n command if you want to see the connect screen again.
"""
self.caller.msg(string)<|fim▁end|>
|
new_character = create.create_player(playername, None, password,
permissions=permissions,
|
<|file_name|>validation_pool.py<|end_file_name|><|fim▁begin|># Copyright (c) 2011-2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that handles interactions with a Validation Pool.
The validation pool is the set of commits that are ready to be validated i.e.
ready for the commit queue to try.
"""
import ConfigParser
import contextlib
import cPickle
import functools
import httplib
import logging
import os
import sys
import time
import urllib
from xml.dom import minidom
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import constants
from chromite.cbuildbot import portage_utilities
from chromite.cbuildbot import lkgm_manager
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot import tree_status
from chromite.lib import cros_build_lib
from chromite.lib import gerrit
from chromite.lib import git
from chromite.lib import gob_util
from chromite.lib import gs
from chromite.lib import parallel
from chromite.lib import patch as cros_patch
from chromite.lib import timeout_util
# Third-party libraries bundled with chromite need to be listed after the
# first chromite import.
import digraph
# We import mox so that w/in ApplyPoolIntoRepo, if a mox exception is
# thrown, we don't cover it up.
try:
import mox
except ImportError:
mox = None
PRE_CQ = constants.PRE_CQ
CQ = constants.CQ
# The gerrit-on-borg team tells us that delays up to 2 minutes can be
# normal. Setting timeout to 3 minutes to be safe-ish.
SUBMITTED_WAIT_TIMEOUT = 3 * 60 # Time in seconds.
class TreeIsClosedException(Exception):
"""Raised when the tree is closed and we wanted to submit changes."""
def __init__(self, closed_or_throttled=False):
"""Initialization.
Args:
closed_or_throttled: True if the exception is being thrown on a
possibly 'throttled' tree. False if only
thrown on a 'closed' tree. Default: False
"""
if closed_or_throttled:
status_text = 'closed or throttled'
opposite_status_text = 'open'
else:
status_text = 'closed'
opposite_status_text = 'throttled or open'
super(TreeIsClosedException, self).__init__(
'Tree is %s. Please set tree status to %s to '
'proceed.' % (status_text, opposite_status_text))
class FailedToSubmitAllChangesException(failures_lib.StepFailure):
"""Raised if we fail to submit any change."""
def __init__(self, changes):
super(FailedToSubmitAllChangesException, self).__init__(
'FAILED TO SUBMIT ALL CHANGES: Could not verify that changes %s were '
'submitted' % ' '.join(str(c) for c in changes))
class FailedToSubmitAllChangesNonFatalException(<|fim▁hole|>
class InternalCQError(cros_patch.PatchException):
"""Exception thrown when CQ has an unexpected/unhandled error."""
def __init__(self, patch, message):
cros_patch.PatchException.__init__(self, patch, message=message)
def ShortExplanation(self):
return 'failed to apply due to a CQ issue: %s' % (self.message,)
class NoMatchingChangeFoundException(Exception):
"""Raised if we try to apply a non-existent change."""
class ChangeNotInManifestException(Exception):
"""Raised if we try to apply a not-in-manifest change."""
class PatchNotCommitReady(cros_patch.PatchException):
"""Raised if a patch is not marked as commit ready."""
def ShortExplanation(self):
return 'isn\'t marked as Commit-Ready anymore.'
class PatchModified(cros_patch.PatchException):
"""Raised if a patch is modified while the CQ is running."""
def ShortExplanation(self):
return 'was modified while the CQ was in the middle of testing it.'
class PatchRejected(cros_patch.PatchException):
"""Raised if a patch was rejected by the CQ because the CQ failed."""
def ShortExplanation(self):
return 'was rejected by the CQ.'
class PatchFailedToSubmit(cros_patch.PatchException):
"""Raised if we fail to submit a change."""
def ShortExplanation(self):
error = 'could not be submitted by the CQ.'
if self.message:
error += ' The error message from Gerrit was: %s' % (self.message,)
else:
error += ' The Gerrit server might be having trouble.'
return error
class PatchConflict(cros_patch.PatchException):
"""Raised if a patch needs to be rebased."""
def ShortExplanation(self):
return ('could not be submitted because Gerrit reported a conflict. Did '
'you modify your patch during the CQ run? Or do you just need to '
'rebase?')
class PatchSubmittedWithoutDeps(cros_patch.DependencyError):
"""Exception thrown when a patch was submitted incorrectly."""
def ShortExplanation(self):
dep_error = cros_patch.DependencyError.ShortExplanation(self)
return ('was submitted, even though it %s\n'
'\n'
'You may want to revert your patch, and investigate why its'
'dependencies failed to submit.\n'
'\n'
'This error only occurs when we have a dependency cycle, and we '
'submit one change before realizing that a later change cannot '
'be submitted.' % (dep_error,))
class PatchSeriesTooLong(cros_patch.PatchException):
"""Exception thrown when a required dep isn't satisfied."""
def __init__(self, patch, max_length):
cros_patch.PatchException.__init__(self, patch)
self.max_length = max_length
def ShortExplanation(self):
return ("The Pre-CQ cannot handle a patch series longer than %s patches. "
"Please wait for some patches to be submitted before marking more "
"patches as ready. " % (self.max_length,))
def __str__(self):
return self.ShortExplanation()
def _RunCommand(cmd, dryrun):
"""Runs the specified shell cmd if dryrun=False.
Errors are ignored, but logged.
"""
if dryrun:
logging.info('Would have run: %s', ' '.join(cmd))
return
try:
cros_build_lib.RunCommand(cmd)
except cros_build_lib.RunCommandError:
cros_build_lib.Error('Command failed', exc_info=True)
def GetStagesToIgnoreFromConfigFile(config_path):
"""Get a list of stage name prefixes to ignore from |config_path|.
This function reads the specified config file and returns the list
of stage name prefixes to ignore in the CQ. See GetStagesToIgnoreForChange
for more details.
Args:
config_path: The path to the config file to read.
"""
ignored_stages = []
parser = ConfigParser.SafeConfigParser()
try:
parser.read(config_path)
if parser.has_option('GENERAL', 'ignored-stages'):
ignored_stages = parser.get('GENERAL', 'ignored-stages').split()
except ConfigParser.Error:
cros_build_lib.Error('Error parsing %r', config_path, exc_info=True)
return ignored_stages
def GetStagesToIgnoreForChange(build_root, change):
"""Get a list of stages that the CQ should ignore for a given |change|.
The list of stage name prefixes to ignore for each project is specified in a
config file inside the project, named COMMIT-QUEUE.ini. The file would look
like this:
[GENERAL]
ignored-stages: HWTest VMTest
The CQ will submit changes to the given project even if the listed stages
failed. These strings are stage name prefixes, meaning that "HWTest" would
match any HWTest stage (e.g. "HWTest [bvt]" or "HWTest [foo]")
Args:
build_root: The root of the checkout.
change: Change to examine.
Returns:
A list of stages to ignore for the given |change|.
"""
manifest = git.ManifestCheckout.Cached(build_root)
checkout = change.GetCheckout(manifest)
if checkout:
dirname = checkout.GetPath(absolute=True)
path = os.path.join(dirname, 'COMMIT-QUEUE.ini')
return GetStagesToIgnoreFromConfigFile(path)
return []
class GerritHelperNotAvailable(gerrit.GerritException):
"""Exception thrown when a specific helper is requested but unavailable."""
def __init__(self, remote=constants.EXTERNAL_REMOTE):
gerrit.GerritException.__init__(self)
# Stringify the pool so that serialization doesn't try serializing
# the actual HelperPool.
self.remote = remote
self.args = (remote,)
def __str__(self):
return (
"Needed a remote=%s gerrit_helper, but one isn't allowed by this "
"HelperPool instance.") % (self.remote,)
class HelperPool(object):
"""Pool of allowed GerritHelpers to be used by CQ/PatchSeries."""
def __init__(self, cros_internal=None, cros=None):
"""Initialize this instance with the given handlers.
Most likely you want the classmethod SimpleCreate which takes boolean
options.
If a given handler is None, then it's disabled; else the passed in
object is used.
"""
self.pool = {
constants.EXTERNAL_REMOTE : cros,
constants.INTERNAL_REMOTE : cros_internal
}
@classmethod
def SimpleCreate(cls, cros_internal=True, cros=True):
"""Classmethod helper for creating a HelperPool from boolean options.
Args:
cros_internal: If True, allow access to a GerritHelper for internal.
cros: If True, allow access to a GerritHelper for external.
Returns:
An appropriately configured HelperPool instance.
"""
if cros:
cros = gerrit.GetGerritHelper(constants.EXTERNAL_REMOTE)
else:
cros = None
if cros_internal:
cros_internal = gerrit.GetGerritHelper(constants.INTERNAL_REMOTE)
else:
cros_internal = None
return cls(cros_internal=cros_internal, cros=cros)
def ForChange(self, change):
"""Return the helper to use for a particular change.
If no helper is configured, an Exception is raised.
"""
return self.GetHelper(change.remote)
def GetHelper(self, remote):
"""Return the helper to use for a given remote.
If no helper is configured, an Exception is raised.
"""
helper = self.pool.get(remote)
if not helper:
raise GerritHelperNotAvailable(remote)
return helper
def __iter__(self):
for helper in self.pool.itervalues():
if helper:
yield helper
def _PatchWrapException(functor):
"""Decorator to intercept patch exceptions and wrap them.
Specifically, for known/handled Exceptions, it intercepts and
converts it into a DependencyError- via that, preserving the
cause, while casting it into an easier to use form (one that can
be chained in addition).
"""
def f(self, parent, *args, **kwargs):
try:
return functor(self, parent, *args, **kwargs)
except gerrit.GerritException as e:
if isinstance(e, gerrit.QueryNotSpecific):
e = ("%s\nSuggest you use gerrit numbers instead (prefixed with a * "
"if it's an internal change)." % e)
new_exc = cros_patch.PatchException(parent, e)
raise new_exc.__class__, new_exc, sys.exc_info()[2]
except cros_patch.PatchException as e:
if e.patch.id == parent.id:
raise
new_exc = cros_patch.DependencyError(parent, e)
raise new_exc.__class__, new_exc, sys.exc_info()[2]
f.__name__ = functor.__name__
return f
class PatchSeries(object):
"""Class representing a set of patches applied to a single git repository."""
def __init__(self, path, helper_pool=None, forced_manifest=None,
deps_filter_fn=None, is_submitting=False):
"""Constructor.
Args:
path: Path to the buildroot.
helper_pool: Pool of allowed GerritHelpers to be used for fetching
patches. Defaults to allowing both internal and external fetches.
forced_manifest: A manifest object to use for mapping projects to
repositories. Defaults to the buildroot.
deps_filter_fn: A function which specifies what patches you would
like to accept. It is passed a patch and is expected to return
True or False.
is_submitting: Whether we are currently submitting patchsets. This is
used to print better error messages.
"""
self.manifest = forced_manifest
if helper_pool is None:
helper_pool = HelperPool.SimpleCreate(cros_internal=True, cros=True)
self._helper_pool = helper_pool
self._path = path
if deps_filter_fn is None:
deps_filter_fn = lambda x: True
self.deps_filter_fn = deps_filter_fn
self._is_submitting = is_submitting
self.applied = []
self.failed = []
self.failed_tot = {}
# A mapping of ChangeId to exceptions if the patch failed against
# ToT. Primarily used to keep the resolution/applying from going
# down known bad paths.
self._committed_cache = cros_patch.PatchCache()
self._lookup_cache = cros_patch.PatchCache()
self._change_deps_cache = {}
def _ManifestDecorator(functor):
"""Method decorator that sets self.manifest automatically.
This function automatically initializes the manifest, and allows callers to
override the manifest if needed.
"""
# pylint: disable=E0213,W0212,E1101,E1102
def f(self, *args, **kwargs):
manifest = kwargs.pop('manifest', None)
# Wipe is used to track if we need to reset manifest to None, and
# to identify if we already had a forced_manifest via __init__.
wipe = self.manifest is None
if manifest:
if not wipe:
raise ValueError("manifest can't be specified when one is forced "
"via __init__")
elif wipe:
manifest = git.ManifestCheckout.Cached(self._path)
else:
manifest = self.manifest
try:
self.manifest = manifest
return functor(self, *args, **kwargs)
finally:
if wipe:
self.manifest = None
f.__name__ = functor.__name__
f.__doc__ = functor.__doc__
return f
@_ManifestDecorator
def GetGitRepoForChange(self, change, strict=False):
"""Get the project path associated with the specified change.
Args:
change: The change to operate on.
strict: If True, throw ChangeNotInManifest rather than returning
None. Default: False.
Returns:
The project path if found in the manifest. Otherwise returns
None (if strict=False).
"""
project_dir = None
if self.manifest:
checkout = change.GetCheckout(self.manifest, strict=strict)
if checkout is not None:
project_dir = checkout.GetPath(absolute=True)
return project_dir
@_ManifestDecorator
def ApplyChange(self, change):
# Always enable content merging.
return change.ApplyAgainstManifest(self.manifest, trivial=False)
def _LookupHelper(self, patch):
"""Returns the helper for the given cros_patch.PatchQuery object."""
return self._helper_pool.GetHelper(patch.remote)
def _GetGerritPatch(self, query):
"""Query the configured helpers looking for a given change.
Args:
project: The gerrit project to query.
query: A cros_patch.PatchQuery object.
Returns:
A GerritPatch object.
"""
helper = self._LookupHelper(query)
query_text = query.ToGerritQueryText()
change = helper.QuerySingleRecord(
query_text, must_match=not git.IsSHA1(query_text))
if not change:
return
# If the query was a gerrit number based query, check the projects/change-id
# to see if we already have it locally, but couldn't map it since we didn't
# know the gerrit number at the time of the initial injection.
existing = self._lookup_cache[change]
if cros_patch.ParseGerritNumber(query_text) and existing is not None:
keys = change.LookupAliases()
self._lookup_cache.InjectCustomKeys(keys, existing)
return existing
self.InjectLookupCache([change])
if change.IsAlreadyMerged():
self.InjectCommittedPatches([change])
return change
def _LookupUncommittedChanges(self, deps, limit_to=None):
"""Given a set of deps (changes), return unsatisfied dependencies.
Args:
deps: A list of cros_patch.PatchQuery objects representing
sequence of dependencies for the leaf that we need to identify
as either merged, or needing resolving.
limit_to: If non-None, then this must be a mapping (preferably a
cros_patch.PatchCache for translation reasons) of which non-committed
changes are allowed to be used for a transaction.
Returns:
A sequence of cros_patch.GitRepoPatch instances (or derivatives) that
need to be resolved for this change to be mergable.
"""
unsatisfied = []
for dep in deps:
if dep in self._committed_cache:
continue
try:
self._LookupHelper(dep)
except GerritHelperNotAvailable:
# Internal dependencies are irrelevant to external builders.
logging.info("Skipping internal dependency: %s", dep)
continue
dep_change = self._lookup_cache[dep]
if dep_change is None:
dep_change = self._GetGerritPatch(dep)
if dep_change is None:
continue
if getattr(dep_change, 'IsAlreadyMerged', lambda: False)():
continue
elif limit_to is not None and dep_change not in limit_to:
if self._is_submitting:
raise PatchRejected(dep_change)
else:
raise PatchNotCommitReady(dep_change)
unsatisfied.append(dep_change)
# Perform last minute custom filtering.
return [x for x in unsatisfied if self.deps_filter_fn(x)]
def CreateTransaction(self, change, limit_to=None):
"""Given a change, resolve it into a transaction.
In this case, a transaction is defined as a group of commits that
must land for the given change to be merged- specifically its
parent deps, and its CQ-DEPEND.
Args:
change: A cros_patch.GitRepoPatch instance to generate a transaction
for.
limit_to: If non-None, limit the allowed uncommitted patches to
what's in that container/mapping.
Returns:
A sequence of the necessary cros_patch.GitRepoPatch objects for
this transaction.
"""
plan = []
gerrit_deps_seen = cros_patch.PatchCache()
cq_deps_seen = cros_patch.PatchCache()
self._AddChangeToPlanWithDeps(change, plan, gerrit_deps_seen,
cq_deps_seen, limit_to=limit_to)
return plan
def CreateTransactions(self, changes, limit_to=None):
"""Create a list of transactions from a list of changes.
Args:
changes: A list of cros_patch.GitRepoPatch instances to generate
transactions for.
limit_to: See CreateTransaction docs.
Returns:
A list of (change, plan, e) tuples for the given list of changes. The
plan represents the necessary GitRepoPatch objects for a given change. If
an exception occurs while creating the transaction, e will contain the
exception. (Otherwise, e will be None.)
"""
for change in changes:
try:
plan = self.CreateTransaction(change, limit_to=limit_to)
except cros_patch.PatchException as e:
yield (change, (), e)
else:
yield (change, plan, None)
def CreateDisjointTransactions(self, changes, max_txn_length=None):
"""Create a list of disjoint transactions from a list of changes.
Args:
changes: A list of cros_patch.GitRepoPatch instances to generate
transactions for.
max_txn_length: The maximum length of any given transaction. Optional.
By default, do not limit the length of transactions.
Returns:
A list of disjoint transactions and a list of exceptions. Each transaction
can be tried independently, without involving patches from other
transactions. Each change in the pool will included in exactly one of the
transactions, unless the patch does not apply for some reason.
"""
# Gather the dependency graph for the specified changes.
deps, edges, failed = {}, {}, []
for change, plan, ex in self.CreateTransactions(changes, limit_to=changes):
if ex is not None:
logging.info('Failed creating transaction for %s: %s', change, ex)
failed.append(ex)
else:
# Save off the ordered dependencies of this change.
deps[change] = plan
# Mark every change in the transaction as bidirectionally connected.
for change_dep in plan:
edges.setdefault(change_dep, set()).update(plan)
# Calculate an unordered group of strongly connected components.
unordered_plans = digraph.StronglyConnectedComponents(list(edges), edges)
# Sort the groups according to our ordered dependency graph.
ordered_plans = []
for unordered_plan in unordered_plans:
ordered_plan, seen = [], set()
for change in unordered_plan:
# Iterate over the required CLs, adding them to our plan in order.
new_changes = list(dep_change for dep_change in deps[change]
if dep_change not in seen)
new_plan_size = len(ordered_plan) + len(new_changes)
if not max_txn_length or new_plan_size <= max_txn_length:
seen.update(new_changes)
ordered_plan.extend(new_changes)
if ordered_plan:
# We found a transaction that is <= max_txn_length. Process the
# transaction. Ignore the remaining patches for now; they will be
# processed later (once the current transaction has been pushed).
ordered_plans.append(ordered_plan)
else:
# We couldn't find any transactions that were <= max_txn_length.
# This should only happen if circular dependencies prevent us from
# truncating a long list of patches. Reject the whole set of patches
# and complain.
for change in unordered_plan:
failed.append(PatchSeriesTooLong(change, max_txn_length))
return ordered_plans, failed
@_PatchWrapException
def _AddChangeToPlanWithDeps(self, change, plan, gerrit_deps_seen,
cq_deps_seen, limit_to=None,
include_cq_deps=True):
"""Add a change and its dependencies into a |plan|.
Args:
change: The change to add to the plan.
plan: The list of changes to apply, in order. This function will append
|change| and any necessary dependencies to |plan|.
gerrit_deps_seen: The changes whose Gerrit dependencies have already been
processed.
cq_deps_seen: The changes whose CQ-DEPEND and Gerrit dependencies have
already been processed.
limit_to: If non-None, limit the allowed uncommitted patches to
what's in that container/mapping.
include_cq_deps: If True, include CQ dependencies in the list
of dependencies. Defaults to True.
Raises:
DependencyError: If we could not resolve a dependency.
GerritException or GOBError: If there is a failure in querying gerrit.
"""
if change in self._committed_cache:
return
# Get a list of the changes that haven't been committed.
# These are returned as cros_patch.PatchQuery objects.
gerrit_deps, cq_deps = self.GetDepsForChange(change)
# Only process the Gerrit dependencies for each change once. We prioritize
# Gerrit dependencies over CQ dependencies, since Gerrit dependencies might
# be required in order for the change to apply.
old_plan_len = len(plan)
if change not in gerrit_deps_seen:
gerrit_deps = self._LookupUncommittedChanges(
gerrit_deps, limit_to=limit_to)
gerrit_deps_seen.Inject(change)
for dep in gerrit_deps:
self._AddChangeToPlanWithDeps(dep, plan, gerrit_deps_seen, cq_deps_seen,
limit_to=limit_to, include_cq_deps=False)
# If there are cyclic dependencies, we might have already applied this
# patch as part of dependency resolution. If not, apply this patch.
if change not in plan:
plan.append(change)
# Process CQ deps last, so as to avoid circular dependencies between
# Gerrit dependencies and CQ dependencies.
if include_cq_deps and change not in cq_deps_seen:
cq_deps = self._LookupUncommittedChanges(
cq_deps, limit_to=limit_to)
cq_deps_seen.Inject(change)
for dep in plan[old_plan_len:] + cq_deps:
# Add the requested change (plus deps) to our plan, if it we aren't
# already in the process of doing that.
if dep not in cq_deps_seen:
self._AddChangeToPlanWithDeps(dep, plan, gerrit_deps_seen,
cq_deps_seen, limit_to=limit_to)
@_PatchWrapException
def GetDepChangesForChange(self, change):
"""Look up the Gerrit/CQ dependency changes for |change|.
Returns:
(gerrit_deps, cq_deps): The change's Gerrit dependencies and CQ
dependencies, as lists of GerritPatch objects.
Raises:
DependencyError: If we could not resolve a dependency.
GerritException or GOBError: If there is a failure in querying gerrit.
"""
gerrit_deps, cq_deps = self.GetDepsForChange(change)
def _DepsToChanges(deps):
dep_changes = []
unprocessed_deps = []
for dep in deps:
dep_change = self._committed_cache[dep]
if dep_change:
dep_changes.append(dep_change)
else:
unprocessed_deps.append(dep)
for dep in unprocessed_deps:
dep_changes.extend(self._LookupUncommittedChanges(deps))
return dep_changes
return _DepsToChanges(gerrit_deps), _DepsToChanges(cq_deps)
@_PatchWrapException
def GetDepsForChange(self, change):
"""Look up the Gerrit/CQ deps for |change|.
Returns:
A tuple of PatchQuery objects representing change's Gerrit
dependencies, and CQ dependencies.
Raises:
DependencyError: If we could not resolve a dependency.
GerritException or GOBError: If there is a failure in querying gerrit.
"""
val = self._change_deps_cache.get(change)
if val is None:
git_repo = self.GetGitRepoForChange(change)
val = self._change_deps_cache[change] = (
change.GerritDependencies(),
change.PaladinDependencies(git_repo))
return val
def InjectCommittedPatches(self, changes):
"""Record that the given patches are already committed.
This is primarily useful for external code to notify this object
that changes were applied to the tree outside its purview- specifically
useful for dependency resolution.
"""
self._committed_cache.Inject(*changes)
def InjectLookupCache(self, changes):
"""Inject into the internal lookup cache the given changes, using them
(rather than asking gerrit for them) as needed for dependencies.
"""
self._lookup_cache.Inject(*changes)
def FetchChanges(self, changes):
"""Fetch the specified changes, if needed.
If we're an external builder, internal changes are filtered out.
Returns:
An iterator over a list of the filtered changes.
"""
for change in changes:
try:
self._helper_pool.ForChange(change)
except GerritHelperNotAvailable:
# Internal patches are irrelevant to external builders.
logging.info("Skipping internal patch: %s", change)
continue
change.Fetch(self.GetGitRepoForChange(change, strict=True))
yield change
@_ManifestDecorator
def Apply(self, changes, frozen=True, honor_ordering=False,
changes_filter=None):
"""Applies changes from pool into the build root specified by the manifest.
This method resolves each given change down into a set of transactions-
the change and its dependencies- that must go in, then tries to apply
the largest transaction first, working its way down.
If a transaction cannot be applied, then it is rolled back
in full- note that if a change is involved in multiple transactions,
if an earlier attempt fails, that change can be retried in a new
transaction if the failure wasn't caused by the patch being incompatible
to ToT.
Args:
changes: A sequence of cros_patch.GitRepoPatch instances to resolve
and apply.
frozen: If True, then resolving of the given changes is explicitly
limited to just the passed in changes, or known committed changes.
This is basically CQ/Paladin mode, used to limit the changes being
pulled in/committed to just what we allow.
honor_ordering: Apply normally will reorder the transactions it
computes, trying the largest first, then degrading through smaller
transactions if the larger of the two fails. If honor_ordering
is False, then the ordering given via changes is preserved-
this is mainly of use for cbuildbot induced patching, and shouldn't
be used for CQ patching.
changes_filter: If not None, must be a functor taking two arguments:
series, changes; it must return the changes to work on.
This is invoked after the initial changes have been fetched,
thus this is a way for consumers to do last minute checking of the
changes being inspected, and expand the changes if necessary.
Primarily this is of use for cbuildbot patching when dealing w/
uploaded/remote patches.
Returns:
A tuple of changes-applied, Exceptions for the changes that failed
against ToT, and Exceptions that failed inflight; These exceptions
are cros_patch.PatchException instances.
"""
# Prefetch the changes; we need accurate change_id/id's, which is
# guaranteed via Fetch.
changes = list(self.FetchChanges(changes))
if changes_filter:
changes = changes_filter(self, changes)
self.InjectLookupCache(changes)
limit_to = cros_patch.PatchCache(changes) if frozen else None
resolved, applied, failed = [], [], []
for change, plan, ex in self.CreateTransactions(changes, limit_to=limit_to):
if ex is not None:
logging.info("Failed creating transaction for %s: %s", change, ex)
failed.append(ex)
else:
resolved.append((change, plan))
logging.info("Transaction for %s is %s.",
change, ', '.join(map(str, resolved[-1][-1])))
if not resolved:
# No work to do; either no changes were given to us, or all failed
# to be resolved.
return [], failed, []
if not honor_ordering:
# Sort by length, falling back to the order the changes were given to us.
# This is done to prefer longer transactions (more painful to rebase)
# over shorter transactions.
position = dict((change, idx) for idx, change in enumerate(changes))
def mk_key(data):
ids = [x.id for x in data[1]]
return -len(ids), position[data[0]]
resolved.sort(key=mk_key)
for inducing_change, transaction_changes in resolved:
try:
with self._Transaction(transaction_changes):
logging.debug("Attempting transaction for %s: changes: %s",
inducing_change,
', '.join(map(str, transaction_changes)))
self._ApplyChanges(inducing_change, transaction_changes)
except cros_patch.PatchException as e:
logging.info("Failed applying transaction for %s: %s",
inducing_change, e)
failed.append(e)
else:
applied.extend(transaction_changes)
self.InjectCommittedPatches(transaction_changes)
# Uniquify while maintaining order.
def _uniq(l):
s = set()
for x in l:
if x not in s:
yield x
s.add(x)
applied = list(_uniq(applied))
self._is_submitting = True
failed = [x for x in failed if x.patch not in applied]
failed_tot = [x for x in failed if not x.inflight]
failed_inflight = [x for x in failed if x.inflight]
return applied, failed_tot, failed_inflight
@contextlib.contextmanager
def _Transaction(self, commits):
"""ContextManager used to rollback changes to a build root if necessary.
Specifically, if an unhandled non system exception occurs, this context
manager will roll back all relevant modifications to the git repos
involved.
Args:
commits: A sequence of cros_patch.GitRepoPatch instances that compromise
this transaction- this is used to identify exactly what may be changed,
thus what needs to be tracked and rolled back if the transaction fails.
"""
# First, the book keeping code; gather required data so we know what
# to rollback to should this transaction fail. Specifically, we track
# what was checked out for each involved repo, and if it was a branch,
# the sha1 of the branch; that information is enough to rewind us back
# to the original repo state.
project_state = set(
map(functools.partial(self.GetGitRepoForChange, strict=True), commits))
resets = []
for project_dir in project_state:
current_sha1 = git.RunGit(
project_dir, ['rev-list', '-n1', 'HEAD']).output.strip()
resets.append((project_dir, current_sha1))
assert current_sha1
committed_cache = self._committed_cache.copy()
try:
yield
# Reaching here means it was applied cleanly, thus return.
return
except Exception:
logging.info("Rewinding transaction: failed changes: %s .",
', '.join(map(str, commits)), exc_info=True)
for project_dir, sha1 in resets:
git.RunGit(project_dir, ['reset', '--hard', sha1])
self._committed_cache = committed_cache
raise
@_PatchWrapException
def _ApplyChanges(self, _inducing_change, changes):
"""Apply a given ordered sequence of changes.
Args:
_inducing_change: The core GitRepoPatch instance that lead to this
sequence of changes; basically what this transaction was computed from.
Needs to be passed in so that the exception wrapping machinery can
convert any failures, assigning blame appropriately.
manifest: A ManifestCheckout instance representing what we're working on.
changes: A ordered sequence of GitRepoPatch instances to apply.
"""
# Bail immediately if we know one of the requisite patches won't apply.
for change in changes:
failure = self.failed_tot.get(change.id)
if failure is not None:
raise failure
applied = []
for change in changes:
if change in self._committed_cache:
continue
try:
self.ApplyChange(change)
except cros_patch.PatchException as e:
if not e.inflight:
self.failed_tot[change.id] = e
raise
applied.append(change)
logging.debug('Done investigating changes. Applied %s',
' '.join([c.id for c in applied]))
@classmethod
def WorkOnSingleRepo(cls, git_repo, tracking_branch, **kwargs):
"""Classmethod to generate a PatchSeries that targets a single git repo.
It does this via forcing a fake manifest, which in turn points
tracking branch/paths/content-merging at what is passed through here.
Args:
git_repo: Absolute path to the git repository to operate upon.
tracking_branch: Which tracking branch patches should apply against.
kwargs: See PatchSeries.__init__ for the various optional args;
note forced_manifest cannot be used here.
Returns:
A PatchSeries instance w/ a forced manifest.
"""
if 'forced_manifest' in kwargs:
raise ValueError("RawPatchSeries doesn't allow a forced_manifest "
"argument.")
kwargs['forced_manifest'] = _ManifestShim(git_repo, tracking_branch)
return cls(git_repo, **kwargs)
class _ManifestShim(object):
"""A fake manifest that only contains a single repository.
This fake manifest is used to allow us to filter out patches for
the PatchSeries class. It isn't a complete implementation -- we just
implement the functions that PatchSeries uses. It works via duck typing.
All of the below methods accept the same arguments as the corresponding
methods in git.ManifestCheckout.*, but they do not make any use of the
arguments -- they just always return information about this project.
"""
def __init__(self, path, tracking_branch, remote='origin'):
tracking_branch = 'refs/remotes/%s/%s' % (
remote, git.StripRefs(tracking_branch),
)
attrs = dict(local_path=path, path=path, tracking_branch=tracking_branch)
self.checkout = git.ProjectCheckout(attrs)
def FindCheckouts(self, *_args, **_kwargs):
"""Returns the list of checkouts.
In this case, we only have one repository so we just return that repository.
We accept the same arguments as git.ManifestCheckout.FindCheckouts, but we
do not make any use of them.
Returns:
A list of ProjectCheckout objects.
"""
return [self.checkout]
class CalculateSuspects(object):
"""Diagnose the cause for a given set of failures."""
@classmethod
def _FindPackageBuildFailureSuspects(cls, changes, messages):
"""Figure out what CLs are at fault for a set of build failures.
Args:
changes: A list of cros_patch.GerritPatch instances to consider.
messages: A list of build failure messages, of type
BuildFailureMessage.
"""
suspects = set()
for message in messages:
suspects.update(message.FindPackageBuildFailureSuspects(changes))
return suspects
@classmethod
def _FindPreviouslyFailedChanges(cls, candidates):
"""Find what changes that have previously failed the CQ.
The first time a change is included in a build that fails due to a
flaky (or apparently unrelated) failure, we assume that it is innocent. If
this happens more than once, we kick out the CL.
"""
suspects = set()
for change in candidates:
if ValidationPool.GetCLStatusCount(
CQ, change, ValidationPool.STATUS_FAILED):
suspects.add(change)
return suspects
@classmethod
def FilterChromiteChanges(cls, changes):
"""Returns a list of chromite changes in |changes|."""
return [x for x in changes if x.project == constants.CHROMITE_PROJECT]
@classmethod
def _MatchesFailureType(cls, messages, fail_type, strict=True):
"""Returns True if all failures are instances of |fail_type|.
Args:
messages: A list of BuildFailureMessage or NoneType objects
from the failed slaves.
fail_type: The exception class to look for.
strict: If False, treat NoneType message as a match.
Returns:
True if all objects in |messages| are non-None and all failures are
instances of |fail_type|.
"""
return ((not strict or all(messages)) and
all(x.MatchesFailureType(fail_type) for x in messages if x))
@classmethod
def OnlyLabFailures(cls, messages, no_stat):
"""Determine if the cause of build failure was lab failure.
Args:
messages: A list of BuildFailureMessage or NoneType objects
from the failed slaves.
no_stat: A list of builders which failed prematurely without reporting
status.
Returns:
True if the build failed purely due to lab failures.
"""
# If any builder failed prematuely, lab failure was not the only cause.
return (not no_stat and
cls._MatchesFailureType(messages, failures_lib.TestLabFailure))
@classmethod
def OnlyInfraFailures(cls, messages, no_stat):
"""Determine if the cause of build failure was infrastructure failure.
Args:
messages: A list of BuildFailureMessage or NoneType objects
from the failed slaves.
no_stat: A list of builders which failed prematurely without reporting
status.
Returns:
True if the build failed purely due to infrastructure failures.
"""
# "Failed to report status" and "NoneType" messages are considered
# infra failures.
return ((not messages and no_stat) or
cls._MatchesFailureType(
messages, failures_lib.InfrastructureFailure, strict=False))
@classmethod
def FindSuspects(cls, build_root, changes, messages, infra_fail=False,
lab_fail=False):
"""Find out what changes probably caused our failure.
In cases where there were no internal failures, we can assume that the
external failures are at fault. Otherwise, this function just defers to
_FindPackageBuildFailureSuspects and FindPreviouslyFailedChanges as needed.
If the failures don't match either case, just fail everything.
Args:
build_root: Build root directory.
changes: A list of cros_patch.GerritPatch instances to consider.
messages: A list of build failure messages, of type
BuildFailureMessage or of type NoneType.
infra_fail: The build failed purely due to infrastructure failures.
lab_fail: The build failed purely due to test lab infrastructure
failures.
Returns:
A set of changes as suspects.
"""
bad_changes = ValidationPool.GetShouldRejectChanges(changes)
if bad_changes:
# If there are changes that have been set verified=-1 or
# code-review=-2, these changes are the ONLY suspects of the
# failed build.
logging.warning('Detected that some changes have been blamed for '
'the build failure. Only these CLs will be rejected')
return set(bad_changes)
elif lab_fail:
logging.warning('Detected that the build failed purely due to HW '
'Test Lab failure(s). Will not reject any changes')
return set()
elif not lab_fail and infra_fail:
# The non-lab infrastructure errors might have been caused
# by chromite changes.
logging.warning(
'Detected that the build failed due to non-lab infrastructure '
'issue(s). Will only reject chromite changes')
return set(cls.FilterChromiteChanges(changes))
suspects = set()
# If there were no internal failures, only kick out external changes.
# Treat None messages as external for this purpose.
if any(message and message.internal for message in messages):
candidates = changes
else:
candidates = [change for change in changes if not change.internal]
# Filter out innocent internal overlay changes from our list of candidates.
candidates = cls.FilterInnocentOverlayChanges(
build_root, candidates, messages)
if all(message and message.IsPackageBuildFailure()
for message in messages):
# If we are here, there are no None messages.
suspects = cls._FindPackageBuildFailureSuspects(candidates, messages)
else:
suspects.update(candidates)
return suspects
@classmethod
def GetResponsibleOverlays(cls, build_root, messages):
"""Get the set of overlays that could have caused failures.
This loops through the set of builders that failed in a given run and
finds what overlays could have been responsible for the failure.
Args:
build_root: Build root directory.
messages: A list of build failure messages from supporting builders.
These must be BuildFailureMessage objects or NoneType objects.
Returns:
The set of overlays that could have caused the failures. If we can't
determine what overlays are responsible, returns None.
"""
responsible_overlays = set()
for message in messages:
if message is None:
return None
bot_id = message.builder
config = cbuildbot_config.config.get(bot_id)
if not config:
return None
for board in config.boards:
overlays = portage_utilities.FindOverlays(
constants.BOTH_OVERLAYS, board, build_root)
responsible_overlays.update(overlays)
return responsible_overlays
@classmethod
def GetAffectedOverlays(cls, change, manifest, all_overlays):
"""Get the set of overlays affected by a given change.
Args:
change: The change to look at.
manifest: A ManifestCheckout instance representing our build directory.
all_overlays: The set of all valid overlays.
Returns:
The set of overlays affected by the specified |change|. If the change
affected something other than an overlay, return None.
"""
checkout = change.GetCheckout(manifest, strict=False)
if checkout:
git_repo = checkout.GetPath(absolute=True)
# The whole git repo is an overlay. Return it.
# Example: src/private-overlays/overlay-x86-zgb-private
if git_repo in all_overlays:
return set([git_repo])
# Get the set of immediate subdirs affected by the change.
# Example: src/overlays/overlay-x86-zgb
subdirs = set([os.path.join(git_repo, path.split(os.path.sep)[0])
for path in change.GetDiffStatus(git_repo)])
# If all of the subdirs are overlays, return them.
if subdirs.issubset(all_overlays):
return subdirs
@classmethod
def FilterInnocentOverlayChanges(cls, build_root, changes, messages):
"""Filter out clearly innocent overlay changes based on failure messages.
It is not possible to break a x86-generic builder via a change to an
unrelated overlay (e.g. amd64-generic). Filter out changes that are
known to be innocent.
Args:
build_root: Build root directory.
changes: Changes to filter.
messages: A list of build failure messages from supporting builders.
These must be BuildFailureMessage objects or NoneType objects.
Returns:
The list of changes that are potentially guilty.
"""
responsible_overlays = cls.GetResponsibleOverlays(build_root, messages)
if responsible_overlays is None:
return changes
all_overlays = set(portage_utilities.FindOverlays(
constants.BOTH_OVERLAYS, None, build_root))
manifest = git.ManifestCheckout.Cached(build_root)
candidates = []
for change in changes:
overlays = cls.GetAffectedOverlays(change, manifest, all_overlays)
if overlays is None or overlays.issubset(responsible_overlays):
candidates.append(change)
return candidates
class ValidationPool(object):
"""Class that handles interactions with a validation pool.
This class can be used to acquire a set of commits that form a pool of
commits ready to be validated and committed.
Usage: Use ValidationPool.AcquirePool -- a static
method that grabs the commits that are ready for validation.
"""
GLOBAL_DRYRUN = False
MAX_TIMEOUT = 60 * 60 * 4
SLEEP_TIMEOUT = 30
STATUS_FAILED = manifest_version.BuilderStatus.STATUS_FAILED
STATUS_INFLIGHT = manifest_version.BuilderStatus.STATUS_INFLIGHT
STATUS_PASSED = manifest_version.BuilderStatus.STATUS_PASSED
STATUS_LAUNCHING = 'launching'
STATUS_WAITING = 'waiting'
INCONSISTENT_SUBMIT_MSG = ('Gerrit thinks that the change was not submitted, '
'even though we hit the submit button.')
# The grace period (in seconds) before we reject a patch due to dependency
# errors.
REJECTION_GRACE_PERIOD = 30 * 60
# Cache for the status of CLs.
_CL_STATUS_CACHE = {}
def __init__(self, overlays, build_root, build_number, builder_name,
is_master, dryrun, changes=None, non_os_changes=None,
conflicting_changes=None, pre_cq=False, metadata=None):
"""Initializes an instance by setting default variables to instance vars.
Generally use AcquirePool as an entry pool to a pool rather than this
method.
Args:
overlays: One of constants.VALID_OVERLAYS.
build_root: Build root directory.
build_number: Build number for this validation attempt.
builder_name: Builder name on buildbot dashboard.
is_master: True if this is the master builder for the Commit Queue.
dryrun: If set to True, do not submit anything to Gerrit.
Optional Args:
changes: List of changes for this validation pool.
non_os_changes: List of changes that are part of this validation
pool but aren't part of the cros checkout.
conflicting_changes: Changes that failed to apply but we're keeping around
because they conflict with other changes in flight.
pre_cq: If set to True, this builder is verifying CLs before they go to
the commit queue.
metadata: Optional CBuildbotMetadata instance where CL actions will
be recorded.
"""
self.build_root = build_root
# These instances can be instantiated via both older, or newer pickle
# dumps. Thus we need to assert the given args since we may be getting
# a value we no longer like (nor work with).
if overlays not in constants.VALID_OVERLAYS:
raise ValueError("Unknown/unsupported overlay: %r" % (overlays,))
self._helper_pool = self.GetGerritHelpersForOverlays(overlays)
if not isinstance(build_number, int):
raise ValueError("Invalid build_number: %r" % (build_number,))
if not isinstance(builder_name, basestring):
raise ValueError("Invalid builder_name: %r" % (builder_name,))
for changes_name, changes_value in (
('changes', changes), ('non_os_changes', non_os_changes)):
if not changes_value:
continue
if not all(isinstance(x, cros_patch.GitRepoPatch) for x in changes_value):
raise ValueError(
'Invalid %s: all elements must be a GitRepoPatch derivative, got %r'
% (changes_name, changes_value))
if conflicting_changes and not all(
isinstance(x, cros_patch.PatchException)
for x in conflicting_changes):
raise ValueError(
'Invalid conflicting_changes: all elements must be a '
'cros_patch.PatchException derivative, got %r'
% (conflicting_changes,))
self.build_log = self.ConstructDashboardURL(overlays, pre_cq, builder_name,
str(build_number))
self.is_master = bool(is_master)
self.pre_cq = pre_cq
self._metadata = metadata
self.dryrun = bool(dryrun) or self.GLOBAL_DRYRUN
self.queue = 'A trybot' if pre_cq else 'The Commit Queue'
self.bot = PRE_CQ if pre_cq else CQ
# See optional args for types of changes.
self.changes = changes or []
self.non_manifest_changes = non_os_changes or []
# Note, we hold onto these CLs since they conflict against our current CLs
# being tested; if our current ones succeed, we notify the user to deal
# w/ the conflict. If the CLs we're testing fail, then there is no
# reason we can't try these again in the next run.
self.changes_that_failed_to_apply_earlier = conflicting_changes or []
# Private vars only used for pickling.
self._overlays = overlays
self._build_number = build_number
self._builder_name = builder_name
@staticmethod
def GetBuildDashboardForOverlays(overlays, trybot):
"""Discern the dashboard to use based on the given overlay."""
if trybot:
return constants.TRYBOT_DASHBOARD
if overlays in [constants.PRIVATE_OVERLAYS, constants.BOTH_OVERLAYS]:
return constants.BUILD_INT_DASHBOARD
return constants.BUILD_DASHBOARD
@classmethod
def ConstructDashboardURL(cls, overlays, trybot, builder_name, build_number,
stage=None):
"""Return the dashboard (buildbot) URL for this run
Args:
overlays: One of constants.VALID_OVERLAYS.
trybot: Boolean: is this a remote trybot?
builder_name: Builder name on buildbot dashboard.
build_number: Build number for this validation attempt.
stage: Link directly to a stage log, else use the general landing page.
Returns:
The fully formed URL
"""
build_dashboard = cls.GetBuildDashboardForOverlays(overlays, trybot)
url_suffix = 'builders/%s/builds/%s' % (builder_name, str(build_number))
if stage:
url_suffix += '/steps/%s/logs/stdio' % (stage,)
url_suffix = urllib.quote(url_suffix)
return os.path.join(build_dashboard, url_suffix)
@staticmethod
def GetGerritHelpersForOverlays(overlays):
"""Discern the allowed GerritHelpers to use based on the given overlay."""
cros_internal = cros = False
if overlays in [constants.PUBLIC_OVERLAYS, constants.BOTH_OVERLAYS, False]:
cros = True
if overlays in [constants.PRIVATE_OVERLAYS, constants.BOTH_OVERLAYS]:
cros_internal = True
return HelperPool.SimpleCreate(cros_internal=cros_internal, cros=cros)
def __reduce__(self):
"""Used for pickling to re-create validation pool."""
# NOTE: self._metadata is specifically excluded from the validation pool
# pickle. We do not want the un-pickled validation pool to have a reference
# to its own un-pickled metadata instance. Instead, we want to to refer
# to the builder run's metadata instance. This is accomplished by setting
# metadata at un-pickle time, in ValidationPool.Load(...).
return (
self.__class__,
(
self._overlays,
self.build_root, self._build_number, self._builder_name,
self.is_master, self.dryrun, self.changes,
self.non_manifest_changes,
self.changes_that_failed_to_apply_earlier,
self.pre_cq))
@classmethod
def FilterDraftChanges(cls, changes):
"""Filter out draft changes based on the status of the latest patch set.
Our Gerrit query cannot exclude changes whose latest patch set has
not yet been published as long as there is one published patchset
in the change. Such changes will fail when we try to merge them,
which may lead to undesirable consequence (e.g. dependencies not
respected).
Args:
changes: List of changes to filter.
Returns:
List of published changes.
"""
return [x for x in changes if not x.patch_dict['currentPatchSet']['draft']]
@classmethod
def GetShouldRejectChanges(cls, changes):
"""Returns the changes that should be rejected.
Check whether the change should be rejected (e.g. verified: -1,
code-review: -2).
Args:
changes: List of changes.
Returns:
A list of changes that should be rejected.
"""
return [x for x in changes if
any(x.HasApproval(f, v) for f, v in
constants.DEFAULT_CQ_SHOULD_REJECT_FIELDS.iteritems())]
@classmethod
def FilterNonMatchingChanges(cls, changes):
"""Filter out changes that don't actually match our query.
Generally, Gerrit should only return patches that match our
query. However, Gerrit keeps a query cache and the cached data may
be stale.
There are also race conditions (bugs in Gerrit) where the final
patch won't match our query. Here's an example problem that this
code fixes: If the Pre-CQ launcher picks up a CL while the CQ is
committing the CL, it may catch a race condition where a new
patchset has been created and committed by the CQ, but the CL is
still treated as if it matches the query (which it doesn't,
anymore).
Args:
changes: List of changes to filter.
Returns:
List of changes that match our query.
"""
filtered_changes = []
should_reject_changes = cls.GetShouldRejectChanges(changes)
for change in changes:
if change in should_reject_changes:
continue
# Because the gerrit cache sometimes gets stale, double-check that the
# change hasn't already been merged.
if change.status != 'NEW':
continue
# Check that the user (or chrome-bot) uploaded a new change under our
# feet while Gerrit was in the middle of answering our query.
for field, value in constants.DEFAULT_CQ_READY_FIELDS.iteritems():
if not change.HasApproval(field, value):
break
else:
filtered_changes.append(change)
return filtered_changes
@classmethod
@failures_lib.SetFailureType(failures_lib.BuilderFailure)
def AcquirePreCQPool(cls, *args, **kwargs):
"""See ValidationPool.__init__ for arguments."""
kwargs.setdefault('pre_cq', True)
kwargs.setdefault('is_master', True)
pool = cls(*args, **kwargs)
pool.RecordPatchesInMetadata()
return pool
@classmethod
def AcquirePool(cls, overlays, repo, build_number, builder_name,
dryrun=False, changes_query=None, check_tree_open=True,
change_filter=None, throttled_ok=False, metadata=None):
"""Acquires the current pool from Gerrit.
Polls Gerrit and checks for which changes are ready to be committed.
Should only be called from master builders.
Args:
overlays: One of constants.VALID_OVERLAYS.
repo: The repo used to sync, to filter projects, and to apply patches
against.
build_number: Corresponding build number for the build.
builder_name: Builder name on buildbot dashboard.
dryrun: Don't submit anything to gerrit.
changes_query: The gerrit query to use to identify changes; if None,
uses the internal defaults.
check_tree_open: If True, only return when the tree is open.
change_filter: If set, use change_filter(pool, changes,
non_manifest_changes) to filter out unwanted patches.
throttled_ok: if |check_tree_open|, treat a throttled tree as open.
Default: True.
metadata: Optional CBuildbotMetadata instance where CL actions will
be recorded.
Returns:
ValidationPool object.
Raises:
TreeIsClosedException: if the tree is closed (or throttled, if not
|throttled_ok|).
"""
if change_filter is None:
change_filter = lambda _, x, y: (x, y)
# We choose a longer wait here as we haven't committed to anything yet. By
# doing this here we can reduce the number of builder cycles.
end_time = time.time() + cls.MAX_TIMEOUT
while True:
time_left = end_time - time.time()
# Wait until the tree becomes open (or throttled, if |throttled_ok|,
# and record the tree status).
if check_tree_open:
try:
status = tree_status.WaitForTreeStatus(
period=cls.SLEEP_TIMEOUT, timeout=time_left,
throttled_ok=throttled_ok)
except timeout_util.TimeoutError:
raise TreeIsClosedException(closed_or_throttled=not throttled_ok)
else:
status = constants.TREE_OPEN
waiting_for = 'new CLs'
# Select the right default gerrit query based on the the tree
# status, or use custom |changes_query| if it was provided.
using_default_query = (changes_query is None)
if not using_default_query:
query = changes_query
elif status == constants.TREE_THROTTLED:
query = constants.THROTTLED_CQ_READY_QUERY
waiting_for = 'new CQ+2 CLs or the tree to open'
else:
query = constants.DEFAULT_CQ_READY_QUERY
# Sync so that we are up-to-date on what is committed.
repo.Sync()
# Only master configurations should call this method.
pool = ValidationPool(overlays, repo.directory, build_number,
builder_name, True, dryrun, metadata=metadata)
draft_changes = []
# Iterate through changes from all gerrit instances we care about.
for helper in cls.GetGerritHelpersForOverlays(overlays):
raw_changes = helper.Query(query, sort='lastUpdated')
raw_changes.reverse()
# Reload the changes because the data in the Gerrit cache may be stale.
raw_changes = list(cls.ReloadChanges(raw_changes))
# If we used a default query, verify the results match the query, to
# prevent race conditions. Note, this filters using the conditions
# of DEFAULT_CQ_READY_QUERY even if the tree is throttled. Since that
# query is strictly more permissive than the throttled query, we are
# not at risk of incorrectly losing any patches here. We only expose
# ourselves to the minor race condititon that a CQ+2 patch could have
# been marked as CQ+1 out from under us, but still end up being picked
# up in a throttled CQ run.
if using_default_query:
published_changes = cls.FilterDraftChanges(raw_changes)
draft_changes.extend(set(raw_changes) - set(published_changes))
raw_changes = cls.FilterNonMatchingChanges(published_changes)
changes, non_manifest_changes = ValidationPool._FilterNonCrosProjects(
raw_changes, git.ManifestCheckout.Cached(repo.directory))
pool.changes.extend(changes)
pool.non_manifest_changes.extend(non_manifest_changes)
for change in draft_changes:
pool.HandleDraftChange(change)
# Filter out unwanted changes.
pool.changes, pool.non_manifest_changes = change_filter(
pool, pool.changes, pool.non_manifest_changes)
if (pool.changes or pool.non_manifest_changes or dryrun or time_left < 0
or cls.ShouldExitEarly()):
break
logging.info('Waiting for %s (%d minutes left)...', waiting_for,
time_left / 60)
time.sleep(cls.SLEEP_TIMEOUT)
pool.RecordPatchesInMetadata()
return pool
def AddPendingCommitsIntoPool(self, manifest):
"""Add the pending commits from |manifest| into pool.
Args:
manifest: path to the manifest.
"""
manifest_dom = minidom.parse(manifest)
pending_commits = manifest_dom.getElementsByTagName(
lkgm_manager.PALADIN_COMMIT_ELEMENT)
for pc in pending_commits:
patch = cros_patch.GerritFetchOnlyPatch(
pc.getAttribute(lkgm_manager.PALADIN_PROJECT_URL_ATTR),
pc.getAttribute(lkgm_manager.PALADIN_PROJECT_ATTR),
pc.getAttribute(lkgm_manager.PALADIN_REF_ATTR),
pc.getAttribute(lkgm_manager.PALADIN_BRANCH_ATTR),
pc.getAttribute(lkgm_manager.PALADIN_REMOTE_ATTR),
pc.getAttribute(lkgm_manager.PALADIN_COMMIT_ATTR),
pc.getAttribute(lkgm_manager.PALADIN_CHANGE_ID_ATTR),
pc.getAttribute(lkgm_manager.PALADIN_GERRIT_NUMBER_ATTR),
pc.getAttribute(lkgm_manager.PALADIN_PATCH_NUMBER_ATTR),
owner_email=pc.getAttribute(lkgm_manager.PALADIN_OWNER_EMAIL_ATTR),
fail_count=int(pc.getAttribute(lkgm_manager.PALADIN_FAIL_COUNT_ATTR)),
pass_count=int(pc.getAttribute(lkgm_manager.PALADIN_PASS_COUNT_ATTR)),
total_fail_count=int(pc.getAttribute(
lkgm_manager.PALADIN_TOTAL_FAIL_COUNT_ATTR)),)
self.changes.append(patch)
@classmethod
def AcquirePoolFromManifest(cls, manifest, overlays, repo, build_number,
builder_name, is_master, dryrun, metadata=None):
"""Acquires the current pool from a given manifest.
This function assumes that you have already synced to the given manifest.
Args:
manifest: path to the manifest where the pool resides.
overlays: One of constants.VALID_OVERLAYS.
repo: The repo used to filter projects and to apply patches against.
build_number: Corresponding build number for the build.
builder_name: Builder name on buildbot dashboard.
is_master: Boolean that indicates whether this is a pool for a master.
config or not.
dryrun: Don't submit anything to gerrit.
metadata: Optional CBuildbotMetadata instance where CL actions will
be recorded.
Returns:
ValidationPool object.
"""
pool = ValidationPool(overlays, repo.directory, build_number, builder_name,
is_master, dryrun, metadata=metadata)
pool.AddPendingCommitsIntoPool(manifest)
pool.RecordPatchesInMetadata()
return pool
@classmethod
def ShouldExitEarly(cls):
"""Return whether we should exit early.
This function is intended to be overridden by tests or by subclasses.
"""
return False
@staticmethod
def _FilterNonCrosProjects(changes, manifest):
"""Filters changes to a tuple of relevant changes.
There are many code reviews that are not part of Chromium OS and/or
only relevant on a different branch. This method returns a tuple of (
relevant reviews in a manifest, relevant reviews not in the manifest). Note
that this function must be run while chromite is checked out in a
repo-managed checkout.
Args:
changes: List of GerritPatch objects.
manifest: The manifest to check projects/branches against.
Returns:
Tuple of (relevant reviews in a manifest,
relevant reviews not in the manifest).
"""
def IsCrosReview(change):
return (change.project.startswith('chromiumos') or
change.project.startswith('chromeos'))
# First we filter to only Chromium OS repositories.
changes = [c for c in changes if IsCrosReview(c)]
changes_in_manifest = []
changes_not_in_manifest = []
for change in changes:
if change.GetCheckout(manifest, strict=False):
changes_in_manifest.append(change)
else:
changes_not_in_manifest.append(change)
logging.info('Filtered change %s', change)
return changes_in_manifest, changes_not_in_manifest
@classmethod
def _FilterDependencyErrors(cls, errors):
"""Filter out ignorable DependencyError exceptions.
If a dependency isn't marked as ready, or a dependency fails to apply,
we only complain after REJECTION_GRACE_PERIOD has passed since the patch
was uploaded.
This helps in two situations:
1) If the developer is in the middle of marking a stack of changes as
ready, we won't reject their work until the grace period has passed.
2) If the developer marks a big circular stack of changes as ready, and
some change in the middle of the stack doesn't apply, the user will
get a chance to rebase their change before we mark all the changes as
'not ready'.
This function filters out dependency errors that can be ignored due to
the grace period.
Args:
errors: List of exceptions to filter.
Returns:
List of unfiltered exceptions.
"""
reject_timestamp = time.time() - cls.REJECTION_GRACE_PERIOD
results = []
for error in errors:
results.append(error)
if reject_timestamp < error.patch.approval_timestamp:
while error is not None:
if isinstance(error, cros_patch.DependencyError):
logging.info('Ignoring dependency errors for %s due to grace '
'period', error.patch)
results.pop()
break
error = getattr(error, 'error', None)
return results
@classmethod
def PrintLinksToChanges(cls, changes):
"""Print links to the specified |changes|.
This method prints a link to list of |changes| by using the
information stored in |changes|. It should not attempt to query
Google Storage or Gerrit.
Args:
changes: A list of cros_patch.GerritPatch instances to generate
transactions for.
"""
def SortKeyForChanges(change):
return (-change.total_fail_count, -change.fail_count,
os.path.basename(change.project), change.gerrit_number)
# Now, sort and print the changes.
for change in sorted(changes, key=SortKeyForChanges):
project = os.path.basename(change.project)
gerrit_number = cros_patch.AddPrefix(change, change.gerrit_number)
# We cannot print '@' in the link because it is used to separate
# the display text and the URL by the buildbot annotator.
author = change.owner_email.replace('@', '-AT-')
if (change.owner_email.endswith(constants.GOOGLE_EMAIL) or
change.owner_email.endswith(constants.CHROMIUM_EMAIL)):
author = change.owner
s = '%s | %s | %s' % (project, author, gerrit_number)
# Print a count of how many times a given CL has failed the CQ.
if change.total_fail_count:
s += ' | fails:%d' % (change.fail_count,)
if change.total_fail_count > change.fail_count:
s += '(%d)' % (change.total_fail_count,)
# Add a note if the latest patchset has already passed the CQ.
if change.pass_count > 0:
s += ' | passed:%d' % change.pass_count
cros_build_lib.PrintBuildbotLink(s, change.url)
def ApplyPoolIntoRepo(self, manifest=None):
"""Applies changes from pool into the directory specified by the buildroot.
This method applies changes in the order specified. If the build
is running as the master, it also respects the dependency
order. Otherwise, the changes should already be listed in an order
that will not break the dependency order.
Returns:
True if we managed to apply any changes.
"""
applied = []
failed_tot = failed_inflight = {}
patch_series = PatchSeries(self.build_root, helper_pool=self._helper_pool)
if self.is_master:
try:
# pylint: disable=E1123
applied, failed_tot, failed_inflight = patch_series.Apply(
self.changes, manifest=manifest)
except (KeyboardInterrupt, RuntimeError, SystemExit):
raise
except Exception as e:
if mox is not None and isinstance(e, mox.Error):
raise
msg = (
'Unhandled exception occurred while applying changes: %s\n\n'
'To be safe, we have kicked out all of the CLs, so that the '
'commit queue does not go into an infinite loop retrying '
'patches.' % (e,)
)
links = ', '.join('CL:%s' % x.gerrit_number_str for x in self.changes)
cros_build_lib.Error('%s\nAffected Patches are: %s', msg, links)
errors = [InternalCQError(patch, msg) for patch in self.changes]
self._HandleApplyFailure(errors)
raise
# Completely fill the status cache in parallel.
self.FillCLStatusCache(CQ, applied)
for change in applied:
change.total_fail_count = self.GetCLStatusCount(
CQ, change, self.STATUS_FAILED, latest_patchset_only=False)
change.fail_count = self.GetCLStatusCount(
CQ, change, self.STATUS_FAILED)
change.pass_count = self.GetCLStatusCount(
CQ, change, self.STATUS_PASSED)
else:
# Slaves do not need to create transactions and should simply
# apply the changes serially, based on the order that the
# changes were listed on the manifest.
for change in self.changes:
try:
# pylint: disable=E1123
patch_series.ApplyChange(change, manifest=manifest)
except cros_patch.PatchException as e:
# Fail if any patch cannot be applied.
self._HandleApplyFailure([InternalCQError(change, e)])
raise
else:
applied.append(change)
self.PrintLinksToChanges(applied)
if self.is_master:
inputs = [[change] for change in applied]
parallel.RunTasksInProcessPool(self._HandleApplySuccess, inputs)
failed_tot = self._FilterDependencyErrors(failed_tot)
if failed_tot:
logging.info(
'The following changes could not cleanly be applied to ToT: %s',
' '.join([c.patch.id for c in failed_tot]))
self._HandleApplyFailure(failed_tot)
failed_inflight = self._FilterDependencyErrors(failed_inflight)
if failed_inflight:
logging.info(
'The following changes could not cleanly be applied against the '
'current stack of patches; if this stack fails, they will be tried '
'in the next run. Inflight failed changes: %s',
' '.join([c.patch.id for c in failed_inflight]))
self.changes_that_failed_to_apply_earlier.extend(failed_inflight)
self.changes = applied
return bool(self.changes)
@staticmethod
def Load(filename, metadata=None, record_patches=True):
"""Loads the validation pool from the file.
Args:
filename: path of file to load from.
metadata: Optional CBuildbotInstance to use as metadata object
for loaded pool (as metadata instances do not survive
pickle/unpickle)
record_patches: Optional, defaults to True. If True, patches
picked up in this pool will be recorded in
metadata.
"""
with open(filename, 'rb') as p_file:
pool = cPickle.load(p_file)
pool._metadata = metadata
# Because metadata is currently not surviving cbuildbot re-execution,
# re-record that patches were picked up in the non-skipped run of
# CommitQueueSync.
# TODO(akeshet): Remove this code once metadata is being pickled and
# passed across re-executions. See crbug.com/356930
if record_patches:
pool.RecordPatchesInMetadata()
return pool
def Save(self, filename):
"""Serializes the validation pool."""
with open(filename, 'wb') as p_file:
cPickle.dump(self, p_file, protocol=cPickle.HIGHEST_PROTOCOL)
# Note: All submit code, all gerrit code, and basically everything other
# than patch resolution/applying needs to use .change_id from patch objects.
# Basically all code from this point forward.
def _SubmitChangeWithDeps(self, patch_series, change, errors, limit_to):
"""Submit |change| and its dependencies.
If you call this function multiple times with the same PatchSeries, each
CL will only be submitted once.
Args:
patch_series: A PatchSeries() object.
change: The change (a GerritPatch object) to submit.
errors: A dictionary. This dictionary should contain all patches that have
encountered errors, and map them to the associated exception object.
limit_to: The list of patches that were approved by this CQ run. We will
only consider submitting patches that are in this list.
Returns:
A copy of the errors object. If new errors have occurred while submitting
this change, and those errors have prevented a change from being
submitted, they will be added to the errors object.
"""
# Find out what patches we need to submit.
errors = errors.copy()
try:
plan = patch_series.CreateTransaction(change, limit_to=limit_to)
except cros_patch.PatchException as e:
errors[change] = e
return errors
error_stack, submitted = [], []
for dep_change in plan:
# Has this change failed to submit before?
dep_error = errors.get(dep_change)
if dep_error is None and error_stack:
# One of the dependencies failed to submit. Report an error.
dep_error = cros_patch.DependencyError(dep_change, error_stack[-1])
# If there were no errors, submit the patch.
if dep_error is None:
try:
if self._SubmitChange(dep_change) or self.dryrun:
submitted.append(dep_change)
else:
msg = self.INCONSISTENT_SUBMIT_MSG
dep_error = PatchFailedToSubmit(dep_change, msg)
except (gob_util.GOBError, gerrit.GerritException) as e:
if getattr(e, 'http_status', None) == httplib.CONFLICT:
dep_error = PatchConflict(dep_change)
else:
dep_error = PatchFailedToSubmit(dep_change, str(e))
logging.error('%s', dep_error)
# Add any error we saw to the stack.
if dep_error is not None:
logging.info('%s', dep_error)
errors[dep_change] = dep_error
error_stack.append(dep_error)
# Track submitted patches so that we don't submit them again.
patch_series.InjectCommittedPatches(submitted)
# Look for incorrectly submitted patches. We only have this problem
# when we have a dependency cycle, and we submit one change before
# realizing that a later change cannot be submitted. For now, just
# print an error message and notify the developers.
#
# If you see this error a lot, consider implementing a best-effort
# attempt at reverting changes.
for submitted_change in submitted:
gdeps, pdeps = patch_series.GetDepChangesForChange(submitted_change)
for dep in gdeps + pdeps:
dep_error = errors.get(dep)
if dep_error is not None:
error = PatchSubmittedWithoutDeps(submitted_change, dep_error)
self._HandleIncorrectSubmission(error)
logging.error('%s was erroneously submitted.', submitted_change)
return errors
def SubmitChanges(self, changes, check_tree_open=True, throttled_ok=True):
"""Submits the given changes to Gerrit.
Args:
changes: GerritPatch's to submit.
check_tree_open: Whether to check that the tree is open before submitting
changes. If this is False, TreeIsClosedException will never be raised.
throttled_ok: if |check_tree_open|, treat a throttled tree as open
Returns:
A list of the changes that failed to submit.
Raises:
TreeIsClosedException: if the tree is closed.
"""
assert self.is_master, 'Non-master builder calling SubmitPool'
assert not self.pre_cq, 'Trybot calling SubmitPool'
# Mark all changes as successful.
inputs = [[self.bot, change, self.STATUS_PASSED, self.dryrun]
for change in changes]
parallel.RunTasksInProcessPool(self.UpdateCLStatus, inputs)
if (check_tree_open and not self.dryrun and not
tree_status.IsTreeOpen(period=self.SLEEP_TIMEOUT,
timeout=self.MAX_TIMEOUT,
throttled_ok=throttled_ok)):
raise TreeIsClosedException(close_or_throttled=not throttled_ok)
# Filter out changes that were modified during the CQ run.
unmodified_changes, errors = self.FilterModifiedChanges(changes)
# Filter out changes that aren't marked as CR=+2, CQ=+1, V=+1 anymore, in
# case the patch status changed during the CQ run.
filtered_changes = self.FilterNonMatchingChanges(unmodified_changes)
for change in set(unmodified_changes) - set(filtered_changes):
errors[change] = PatchNotCommitReady(change)
patch_series = PatchSeries(self.build_root, helper_pool=self._helper_pool)
patch_series.InjectLookupCache(filtered_changes)
for change in filtered_changes:
errors = self._SubmitChangeWithDeps(patch_series, change, errors,
filtered_changes)
for patch, error in errors.iteritems():
logging.error('Could not submit %s', patch)
self._HandleCouldNotSubmit(patch, error)
return errors
def RecordPatchesInMetadata(self):
"""Mark all patches as having been picked up in metadata.json.
If self._metadata is None, then this function does nothing.
"""
if self._metadata:
timestamp = int(time.time())
for change in self.changes:
self._metadata.RecordCLAction(change, constants.CL_ACTION_PICKED_UP,
timestamp)
@classmethod
def FilterModifiedChanges(cls, changes):
"""Filter out changes that were modified while the CQ was in-flight.
Args:
changes: A list of changes (as PatchQuery objects).
Returns:
This returns a tuple (unmodified_changes, errors).
unmodified_changes: A reloaded list of changes, only including unmodified
and unsubmitted changes.
errors: A dictionary. This dictionary will contain all patches that have
encountered errors, and map them to the associated exception object.
"""
# Reload all of the changes from the Gerrit server so that we have a
# fresh view of their approval status. This is needed so that our filtering
# that occurs below will be mostly up-to-date.
unmodified_changes, errors = [], {}
reloaded_changes = list(cls.ReloadChanges(changes))
old_changes = cros_patch.PatchCache(changes)
for change in reloaded_changes:
if change.IsAlreadyMerged():
logging.warning('%s is already merged. It was most likely chumped '
'during the current CQ run.', change)
elif change.patch_number != old_changes[change].patch_number:
# If users upload new versions of a CL while the CQ is in-flight, then
# their CLs are no longer tested. These CLs should be rejected.
errors[change] = PatchModified(change)
else:
unmodified_changes.append(change)
return unmodified_changes, errors
@classmethod
def ReloadChanges(cls, changes):
"""Reload the specified |changes| from the server.
Args:
changes: A list of PatchQuery objects.
Returns:
A list of GerritPatch objects.
"""
return gerrit.GetGerritPatchInfoWithPatchQueries(changes)
def _SubmitChange(self, change):
"""Submits patch using Gerrit Review."""
logging.info('Change %s will be submitted', change)
was_change_submitted = False
helper = self._helper_pool.ForChange(change)
helper.SubmitChange(change, dryrun=self.dryrun)
updated_change = helper.QuerySingleRecord(change.gerrit_number)
# If change is 'SUBMITTED' give gerrit some time to resolve that
# to 'MERGED' or fail outright.
if updated_change.status == 'SUBMITTED':
def _Query():
return helper.QuerySingleRecord(change.gerrit_number)
def _Retry(value):
return value and value.status == 'SUBMITTED'
try:
updated_change = timeout_util.WaitForSuccess(
_Retry, _Query, timeout=SUBMITTED_WAIT_TIMEOUT, period=1)
except timeout_util.TimeoutError:
# The change really is stuck on submitted, not merged, then.
logging.warning('Timed out waiting for gerrit to finish submitting'
' change %s, but status is still "%s".',
change.gerrit_number_str, updated_change.status)
was_change_submitted = updated_change.status == 'MERGED'
if not was_change_submitted:
logging.warning(
'Change %s was submitted to gerrit without errors, but gerrit is'
' reporting it with status "%s" (expected "MERGED").',
change.gerrit_number_str, updated_change.status)
if updated_change.status == 'SUBMITTED':
# So far we have never seen a SUBMITTED CL that did not eventually
# transition to MERGED. If it is stuck on SUBMITTED treat as MERGED.
was_change_submitted = True
logging.info('Proceeding now with the assumption that change %s'
' will eventually transition to "MERGED".',
change.gerrit_number_str)
else:
logging.error('Most likely gerrit was unable to merge change %s.',
change.gerrit_number_str)
if self._metadata:
if was_change_submitted:
action = constants.CL_ACTION_SUBMITTED
else:
action = constants.CL_ACTION_SUBMIT_FAILED
self._metadata.RecordCLAction(change, action)
return was_change_submitted
def RemoveCommitReady(self, change):
"""Remove the commit ready bit for the specified |change|."""
self._helper_pool.ForChange(change).RemoveCommitReady(change,
dryrun=self.dryrun)
if self._metadata:
self._metadata.RecordCLAction(change, constants.CL_ACTION_KICKED_OUT)
def SubmitNonManifestChanges(self, check_tree_open=True):
"""Commits changes to Gerrit from Pool that aren't part of the checkout.
Args:
check_tree_open: Whether to check that the tree is open before submitting
changes. If this is False, TreeIsClosedException will never be raised.
Raises:
TreeIsClosedException: if the tree is closed.
"""
self.SubmitChanges(self.non_manifest_changes,
check_tree_open=check_tree_open)
def SubmitPool(self, check_tree_open=True, throttled_ok=True):
"""Commits changes to Gerrit from Pool. This is only called by a master.
Args:
check_tree_open: Whether to check that the tree is open before submitting
changes. If this is False, TreeIsClosedException will never be raised.
throttled_ok: if |check_tree_open|, treat a throttled tree as open
Raises:
TreeIsClosedException: if the tree is closed.
FailedToSubmitAllChangesException: if we can't submit a change.
FailedToSubmitAllChangesNonFatalException: if we can't submit a change
due to non-fatal errors.
"""
# Note that SubmitChanges can throw an exception if it can't
# submit all changes; in that particular case, don't mark the inflight
# failures patches as failed in gerrit- some may apply next time we do
# a CQ run (since the submit state has changed, we have no way of
# knowing). They *likely* will still fail, but this approach tries
# to minimize wasting the developers time.
errors = self.SubmitChanges(self.changes, check_tree_open=check_tree_open,
throttled_ok=throttled_ok)
if errors:
# We don't throw a fatal error for the whitelisted
# exceptions. These exceptions are mostly caused by human
# intervention during the current run and have limited impact on
# other patches.
whitelisted_exceptions = (PatchConflict,
PatchModified,
PatchNotCommitReady,
cros_patch.DependencyError,)
if all(isinstance(e, whitelisted_exceptions) for e in errors.values()):
raise FailedToSubmitAllChangesNonFatalException(errors)
else:
raise FailedToSubmitAllChangesException(errors)
if self.changes_that_failed_to_apply_earlier:
self._HandleApplyFailure(self.changes_that_failed_to_apply_earlier)
def SubmitPartialPool(self, tracebacks):
"""If the build failed, push any CLs that don't care about the failure.
Each project can specify a list of stages it does not care about in its
COMMIT-QUEUE.ini file. Changes to that project will be submitted even if
those stages fail.
Args:
tracebacks: A list of RecordedTraceback objects. These objects represent
the exceptions that failed the build.
Returns:
A list of the rejected changes.
"""
# Create a list of the failing stage prefixes.
failing_stages = set(traceback.failed_prefix for traceback in tracebacks)
# For each CL, look at whether it cares about the failures. Based on this,
# categorize the CL as accepted or rejected.
accepted, rejected = [], []
for change in self.changes:
ignored_stages = GetStagesToIgnoreForChange(self.build_root, change)
if failing_stages.issubset(ignored_stages):
accepted.append(change)
else:
rejected.append(change)
# Actually submit the accepted changes.
self.SubmitChanges(accepted)
# Return the list of rejected changes.
return rejected
def _HandleApplyFailure(self, failures):
"""Handles changes that were not able to be applied cleanly.
Args:
failures: GerritPatch changes to handle.
"""
for failure in failures:
logging.info('Change %s did not apply cleanly.', failure.patch)
if self.is_master:
self._HandleCouldNotApply(failure)
def _HandleCouldNotApply(self, failure):
"""Handler for when Paladin fails to apply a change.
This handler notifies set CodeReview-2 to the review forcing the developer
to re-upload a rebased change.
Args:
failure: GerritPatch instance to operate upon.
"""
msg = ('%(queue)s failed to apply your change in %(build_log)s .'
' %(failure)s')
self.SendNotification(failure.patch, msg, failure=failure)
self.RemoveCommitReady(failure.patch)
def _HandleIncorrectSubmission(self, failure):
"""Handler for when Paladin incorrectly submits a change."""
msg = ('%(queue)s incorrectly submitted your change in %(build_log)s .'
' %(failure)s')
self.SendNotification(failure.patch, msg, failure=failure)
self.RemoveCommitReady(failure.patch)
def HandleDraftChange(self, change):
"""Handler for when the latest patch set of |change| is not published.
This handler removes the commit ready bit from the specified changes and
sends the developer a message explaining why.
Args:
change: GerritPatch instance to operate upon.
"""
msg = ('%(queue)s could not apply your change because the latest patch '
'set is not published. Please publish your draft patch set before '
'marking your commit as ready.')
self.SendNotification(change, msg)
self.RemoveCommitReady(change)
def HandleValidationTimeout(self, changes=None, sanity=True):
"""Handles changes that timed out.
This handler removes the commit ready bit from the specified changes and
sends the developer a message explaining why.
Args:
changes: A list of cros_patch.GerritPatch instances to mark as failed.
By default, mark all of the changes as failed.
sanity: A boolean indicating whether the build was considered sane. If
not sane, none of the changes will have their CommitReady bit modified.
"""
if changes is None:
changes = self.changes
logging.info('Validation timed out for all changes.')
msg = ('%(queue)s timed out while verifying your change in '
'%(build_log)s . This means that a supporting builder did not '
'finish building your change within the specified timeout.')
if sanity:
msg += ('If you believe this happened in error, just re-mark your '
'commit as ready. Your change will then get automatically '
'retried.')
else:
msg += ('The build failure may have been caused by infrastructure '
'issues, so no changes will be blamed for the failure.')
for change in changes:
logging.info('Validation timed out for change %s.', change)
self.SendNotification(change, msg)
if sanity:
self.RemoveCommitReady(change)
def SendNotification(self, change, msg, **kwargs):
d = dict(build_log=self.build_log, queue=self.queue, **kwargs)
try:
msg %= d
except (TypeError, ValueError) as e:
logging.error(
"Generation of message %s for change %s failed: dict was %r, "
"exception %s", msg, change, d, e)
raise e.__class__(
"Generation of message %s for change %s failed: dict was %r, "
"exception %s" % (msg, change, d, e))
PaladinMessage(msg, change, self._helper_pool.ForChange(change)).Send(
self.dryrun)
def HandlePreCQSuccess(self):
"""Handler that is called when the Pre-CQ successfully verifies a change."""
msg = '%(queue)s successfully verified your change in %(build_log)s .'
for change in self.changes:
if self.GetCLStatus(self.bot, change) != self.STATUS_PASSED:
self.SendNotification(change, msg)
self.UpdateCLStatus(self.bot, change, self.STATUS_PASSED,
dry_run=self.dryrun)
def _HandleCouldNotSubmit(self, change, error=''):
"""Handler that is called when Paladin can't submit a change.
This should be rare, but if an admin overrides the commit queue and commits
a change that conflicts with this change, it'll apply, build/validate but
receive an error when submitting.
Args:
change: GerritPatch instance to operate upon.
error: The reason why the change could not be submitted.
"""
self.SendNotification(change,
'%(queue)s failed to submit your change in %(build_log)s . '
'%(error)s', error=error)
self.RemoveCommitReady(change)
@staticmethod
def _CreateValidationFailureMessage(pre_cq, change, suspects, messages,
sanity=True, infra_fail=False,
lab_fail=False, no_stat=None):
"""Create a message explaining why a validation failure occurred.
Args:
pre_cq: Whether this builder is a Pre-CQ builder.
change: The change we want to create a message for.
suspects: The set of suspect changes that we think broke the build.
messages: A list of build failure messages from supporting builders.
These must be BuildFailureMessage objects or NoneType objects.
sanity: A boolean indicating whether the build was considered sane. If
not sane, none of the changes will have their CommitReady bit modified.
infra_fail: The build failed purely due to infrastructure failures.
lab_fail: The build failed purely due to test lab infrastructure failures.
no_stat: A list of builders which failed prematurely without reporting
status.
"""
msg = []
if no_stat:
msg.append('The following build(s) did not start or failed prematurely:')
msg.append(', '.join(no_stat))
if messages:
# Build a list of error messages. We don't want to build a ridiculously
# long comment, as Gerrit will reject it. See http://crbug.com/236831
max_error_len = 20000 / max(1, len(messages))
msg.append('The following build(s) failed:')
for message in map(str, messages):
if len(message) > max_error_len:
message = message[:max_error_len] + '... (truncated)'
msg.append(message)
# Create a list of changes other than this one that might be guilty.
# Limit the number of suspects to 20 so that the list of suspects isn't
# ridiculously long.
max_suspects = 20
other_suspects = suspects - set([change])
if len(other_suspects) < max_suspects:
other_suspects_str = ', '.join(sorted(
'CL:%s' % x.gerrit_number_str for x in other_suspects))
else:
other_suspects_str = ('%d other changes. See the blamelist for more '
'details.' % (len(other_suspects),))
will_retry_automatically = False
if not sanity:
msg.append('The build was consider not sane because the sanity check '
'builder(s) failed. Your change will not be blamed for the '
'failure.')
will_retry_automatically = True
elif lab_fail:
msg.append('The build encountered Chrome OS Lab infrastructure issues. '
' Your change will not be blamed for the failure.')
will_retry_automatically = True
else:
if infra_fail:
msg.append('The build failure may have been caused by infrastructure '
'issues and/or bad chromite changes.')
if change in suspects:
if other_suspects_str:
msg.append('Your change may have caused this failure. There are '
'also other changes that may be at fault: %s'
% other_suspects_str)
else:
msg.append('This failure was probably caused by your change.')
msg.append('Please check whether the failure is your fault. If your '
'change is not at fault, you may mark it as ready again.')
else:
if len(suspects) == 1:
msg.append('This failure was probably caused by %s'
% other_suspects_str)
elif len(suspects) > 0:
msg.append('One of the following changes is probably at fault: %s'
% other_suspects_str)
will_retry_automatically = not pre_cq
if will_retry_automatically:
msg.insert(
0, 'NOTE: The Commit Queue will retry your change automatically.')
return '\n\n'.join(msg)
def _ChangeFailedValidation(self, change, messages, suspects, sanity,
infra_fail, lab_fail, no_stat):
"""Handles a validation failure for an individual change.
Args:
change: The change to mark as failed.
messages: A list of build failure messages from supporting builders.
These must be BuildFailureMessage objects.
suspects: The list of changes that are suspected of breaking the build.
sanity: A boolean indicating whether the build was considered sane. If
not sane, none of the changes will have their CommitReady bit modified.
infra_fail: The build failed purely due to infrastructure failures.
lab_fail: The build failed purely due to test lab infrastructure failures.
no_stat: A list of builders which failed prematurely without reporting
status.
"""
msg = self._CreateValidationFailureMessage(
self.pre_cq, change, suspects, messages,
sanity, infra_fail, lab_fail, no_stat)
self.SendNotification(change, '%(details)s', details=msg)
if sanity:
if change in suspects:
self.RemoveCommitReady(change)
# Mark the change as failed. If the Ready bit is still set, the change
# will be retried automatically.
self.UpdateCLStatus(self.bot, change, self.STATUS_FAILED,
dry_run=self.dryrun)
def HandleValidationFailure(self, messages, changes=None, sanity=True,
no_stat=None):
"""Handles a list of validation failure messages from slave builders.
This handler parses a list of failure messages from our list of builders
and calculates which changes were likely responsible for the failure. The
changes that were responsible for the failure have their Commit Ready bit
stripped and the other changes are left marked as Commit Ready.
Args:
messages: A list of build failure messages from supporting builders.
These must be BuildFailureMessage objects or NoneType objects.
changes: A list of cros_patch.GerritPatch instances to mark as failed.
By default, mark all of the changes as failed.
sanity: A boolean indicating whether the build was considered sane. If
not sane, none of the changes will have their CommitReady bit modified.
no_stat: A list of builders which failed prematurely without reporting
status. If not None, this implies there were infrastructure issues.
"""
if changes is None:
changes = self.changes
# Reload the changes to get latest statuses of the changes.
changes = self.ReloadChanges(changes)
candidates = []
for change in changes:
# Pre-CQ ignores changes that were already verified.
if self.pre_cq and self.GetCLStatus(PRE_CQ, change) == self.STATUS_PASSED:
continue
candidates.append(change)
suspects = set()
infra_fail = lab_fail = False
if sanity:
# If the build was sane, determine the cause of the failures and
# the changes that are likely at fault for the failure.
lab_fail = CalculateSuspects.OnlyLabFailures(messages, no_stat)
infra_fail = CalculateSuspects.OnlyInfraFailures(messages, no_stat)
suspects = CalculateSuspects.FindSuspects(
self.build_root, candidates, messages, infra_fail=infra_fail,
lab_fail=lab_fail)
# Send out failure notifications for each change.
inputs = [[change, messages, suspects, sanity, infra_fail,
lab_fail, no_stat] for change in candidates]
parallel.RunTasksInProcessPool(self._ChangeFailedValidation, inputs)
def HandleCouldNotApply(self, change):
"""Handler for when Paladin fails to apply a change.
This handler strips the Commit Ready bit forcing the developer
to re-upload a rebased change as this theirs failed to apply cleanly.
Args:
change: GerritPatch instance to operate upon.
"""
msg = '%(queue)s failed to apply your change in %(build_log)s . '
# This is written this way to protect against bugs in CQ itself. We log
# it both to the build output, and mark the change w/ it.
extra_msg = getattr(change, 'apply_error_message', None)
if extra_msg is None:
logging.error(
'Change %s was passed to HandleCouldNotApply without an appropriate '
'apply_error_message set. Internal bug.', change)
extra_msg = (
'Internal CQ issue: extra error info was not given, Please contact '
'the build team and ensure they are aware of this specific change '
'failing.')
msg += extra_msg
self.SendNotification(change, msg)
self.RemoveCommitReady(change)
def _HandleApplySuccess(self, change):
"""Handler for when Paladin successfully applies a change.
This handler notifies a developer that their change is being tried as
part of a Paladin run defined by a build_log.
Args:
change: GerritPatch instance to operate upon.
"""
if self.pre_cq:
status = self.GetCLStatus(self.bot, change)
if status == self.STATUS_PASSED:
return
msg = ('%(queue)s has picked up your change. '
'You can follow along at %(build_log)s .')
self.SendNotification(change, msg)
if not self.pre_cq or status == self.STATUS_LAUNCHING:
self.UpdateCLStatus(self.bot, change, self.STATUS_INFLIGHT,
dry_run=self.dryrun)
@classmethod
def GetCLStatusURL(cls, bot, change, latest_patchset_only=True):
"""Get the status URL for |change| on |bot|.
Args:
bot: Which bot to look at. Can be CQ or PRE_CQ.
change: GerritPatch instance to operate upon.
latest_patchset_only: If True, return the URL for tracking the latest
patchset. If False, return the URL for tracking all patchsets. Defaults
to True.
Returns:
The status URL, as a string.
"""
internal = 'int' if change.internal else 'ext'
components = [constants.MANIFEST_VERSIONS_GS_URL, bot,
internal, str(change.gerrit_number)]
if latest_patchset_only:
components.append(str(change.patch_number))
return '/'.join(components)
@classmethod
def GetCLStatus(cls, bot, change):
"""Get the status for |change| on |bot|.
Args:
change: GerritPatch instance to operate upon.
bot: Which bot to look at. Can be CQ or PRE_CQ.
Returns:
The status, as a string.
"""
url = cls.GetCLStatusURL(bot, change)
ctx = gs.GSContext()
try:
return ctx.Cat(url).output
except gs.GSNoSuchKey:
logging.debug('No status yet for %r', url)
return None
@classmethod
def UpdateCLStatus(cls, bot, change, status, dry_run):
"""Update the |status| of |change| on |bot|."""
for latest_patchset_only in (False, True):
url = cls.GetCLStatusURL(bot, change, latest_patchset_only)
ctx = gs.GSContext(dry_run=dry_run)
ctx.Copy('-', url, input=status)
ctx.Counter('%s/%s' % (url, status)).Increment()
@classmethod
def GetCLStatusCount(cls, bot, change, status, latest_patchset_only=True):
"""Return how many times |change| has been set to |status| on |bot|.
Args:
bot: Which bot to look at. Can be CQ or PRE_CQ.
change: GerritPatch instance to operate upon.
status: The status string to look for.
latest_patchset_only: If True, only how many times the latest patchset has
been set to |status|. If False, count how many times any patchset has
been set to |status|. Defaults to False.
Returns:
The number of times |change| has been set to |status| on |bot|, as an
integer.
"""
cache_key = (bot, change, status, latest_patchset_only)
if cache_key not in cls._CL_STATUS_CACHE:
base_url = cls.GetCLStatusURL(bot, change, latest_patchset_only)
url = '%s/%s' % (base_url, status)
cls._CL_STATUS_CACHE[cache_key] = gs.GSContext().Counter(url).Get()
return cls._CL_STATUS_CACHE[cache_key]
@classmethod
def FillCLStatusCache(cls, bot, changes, statuses=None):
"""Cache all of the stats about the given |changes| in parallel.
Args:
bot: Bot to pull down stats for.
changes: Changes to cache.
statuses: Statuses to cache. By default, cache the PASSED and FAILED
counts.
"""
if statuses is None:
statuses = (cls.STATUS_PASSED, cls.STATUS_FAILED)
inputs = []
for change in changes:
for status in statuses:
for latest_patchset_only in (False, True):
cache_key = (bot, change, status, latest_patchset_only)
if cache_key not in cls._CL_STATUS_CACHE:
inputs.append(cache_key)
with parallel.Manager() as manager:
# Grab the CL status of all of the CLs in the background, into a proxied
# dictionary.
cls._CL_STATUS_CACHE = manager.dict(cls._CL_STATUS_CACHE)
parallel.RunTasksInProcessPool(cls.GetCLStatusCount, inputs)
# Convert the cache back into a regular dictionary before we shut down
# the manager.
cls._CL_STATUS_CACHE = dict(cls._CL_STATUS_CACHE)
def CreateDisjointTransactions(self, manifest, max_txn_length=None):
"""Create a list of disjoint transactions from the changes in the pool.
Args:
manifest: Manifest to use.
max_txn_length: The maximum length of any given transaction. Optional.
By default, do not limit the length of transactions.
Returns:
A list of disjoint transactions. Each transaction can be tried
independently, without involving patches from other transactions.
Each change in the pool will included in exactly one of transactions,
unless the patch does not apply for some reason.
"""
patches = PatchSeries(self.build_root, forced_manifest=manifest)
plans, failed = patches.CreateDisjointTransactions(
self.changes, max_txn_length=max_txn_length)
failed = self._FilterDependencyErrors(failed)
if failed:
self._HandleApplyFailure(failed)
return plans
class PaladinMessage():
"""An object that is used to send messages to developers about their changes.
"""
# URL where Paladin documentation is stored.
_PALADIN_DOCUMENTATION_URL = ('http://www.chromium.org/developers/'
'tree-sheriffs/sheriff-details-chromium-os/'
'commit-queue-overview')
# Gerrit can't handle commands over 32768 bytes. See http://crbug.com/236831
MAX_MESSAGE_LEN = 32000
def __init__(self, message, patch, helper):
if len(message) > self.MAX_MESSAGE_LEN:
message = message[:self.MAX_MESSAGE_LEN] + '... (truncated)'
self.message = message
self.patch = patch
self.helper = helper
def _ConstructPaladinMessage(self):
"""Adds any standard Paladin messaging to an existing message."""
return self.message + ('\n\nCommit queue documentation: %s' %
self._PALADIN_DOCUMENTATION_URL)
def Send(self, dryrun):
"""Posts a comment to a gerrit review."""
body = {
'message': self._ConstructPaladinMessage(),
'notify': 'OWNER',
}
path = 'changes/%s/revisions/%s/review' % (
self.patch.gerrit_number, self.patch.revision)
if dryrun:
logging.info('Would have sent %r to %s', body, path)
return
gob_util.FetchUrl(self.helper.host, path, reqtype='POST', body=body)<|fim▁end|>
|
FailedToSubmitAllChangesException):
"""Raised if we fail to submit any change due to non-fatal errors."""
|
<|file_name|>main.ts<|end_file_name|><|fim▁begin|>import {bootstrap, Component, View, NgIf, bind, Inject, Observable, FormBuilder} from 'angular2/angular2';
import {People} from '../people/people';
@Component({
selector: 'main'
})
@View({
templateUrl: './components/main/main.html',
directives: [People]
})<|fim▁hole|>export class Main {
constructor() { }
}<|fim▁end|>
| |
<|file_name|>vec-matching-autoslice.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(slice_patterns)]
pub fn main() {
let x = [1, 2, 3];
match x {<|fim▁hole|> [_, _, _] => panic!(),
}
let y = ([(1, true), (2, false)], 0.5f64);
match y {
([(1, a), (b, false)], _) => {
assert_eq!(a, true);
assert_eq!(b, 2);
}
([_, _], 0.5) => panic!(),
([_, _], _) => panic!(),
}
}<|fim▁end|>
|
[2, _, _] => panic!(),
[1, a, b] => {
assert!([a, b] == [2, 3]);
}
|
<|file_name|>index.d.ts<|end_file_name|><|fim▁begin|>// Type definitions for Google Drive API v3 3.0
// Project: https://developers.google.com/drive/
// Definitions by: Bolisov Alexey <https://github.com/Bolisov>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.3
// IMPORTANT
// This file was generated by https://github.com/Bolisov/google-api-typings-generator. Please do not edit it manually.
// In case of any problems please post issue to https://github.com/Bolisov/google-api-typings-generator
// Generated from: https://www.googleapis.com/discovery/v1/apis/drive/v3/rest
/// <reference types="gapi.client" />
declare namespace gapi.client {
/** Load Drive API v3 */
function load(name: "drive", version: "v3"): PromiseLike<void>;
function load(name: "drive", version: "v3", callback: () => any): void;
const about: drive.AboutResource;
const changes: drive.ChangesResource;
const channels: drive.ChannelsResource;
const comments: drive.CommentsResource;
const files: drive.FilesResource;
const permissions: drive.PermissionsResource;
const replies: drive.RepliesResource;
const revisions: drive.RevisionsResource;
const teamdrives: drive.TeamdrivesResource;
namespace drive {
interface About {
/** Whether the user has installed the requesting app. */
appInstalled?: boolean;
/** A map of source MIME type to possible targets for all supported exports. */
exportFormats?: Record<string, string[]>;
/** The currently supported folder colors as RGB hex strings. */
folderColorPalette?: string[];
/** A map of source MIME type to possible targets for all supported imports. */
importFormats?: Record<string, string[]>;
/** Identifies what kind of resource this is. Value: the fixed string "drive#about". */
kind?: string;
/** A map of maximum import sizes by MIME type, in bytes. */
maxImportSizes?: Record<string, string>;
/** The maximum upload size in bytes. */
maxUploadSize?: string;
/** The user's storage quota limits and usage. All fields are measured in bytes. */
storageQuota?: {
/** The usage limit, if applicable. This will not be present if the user has unlimited storage. */
limit?: string;
/** The total usage across all services. */
usage?: string;
/** The usage by all files in Google Drive. */
usageInDrive?: string;
/** The usage by trashed files in Google Drive. */
usageInDriveTrash?: string;
};
/** A list of themes that are supported for Team Drives. */
teamDriveThemes?: Array<{
/** A link to this Team Drive theme's background image. */
backgroundImageLink?: string;
/** The color of this Team Drive theme as an RGB hex string. */
colorRgb?: string;
/** The ID of the theme. */
id?: string;
}>;
/** The authenticated user. */
user?: User;
}
interface Change {
/** The updated state of the file. Present if the type is file and the file has not been removed from this list of changes. */
file?: File;
/** The ID of the file which has changed. */
fileId?: string;
/** Identifies what kind of resource this is. Value: the fixed string "drive#change". */
kind?: string;
/** Whether the file or Team Drive has been removed from this list of changes, for example by deletion or loss of access. */
removed?: boolean;
/**
* The updated state of the Team Drive. Present if the type is teamDrive, the user is still a member of the Team Drive, and the Team Drive has not been
* removed.
*/
teamDrive?: TeamDrive;
/** The ID of the Team Drive associated with this change. */
teamDriveId?: string;
/** The time of this change (RFC 3339 date-time). */
time?: string;
/** The type of the change. Possible values are file and teamDrive. */
type?: string;
}
interface ChangeList {
/** The list of changes. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched. */
changes?: Change[];
/** Identifies what kind of resource this is. Value: the fixed string "drive#changeList". */
kind?: string;
/** The starting page token for future changes. This will be present only if the end of the current changes list has been reached. */
newStartPageToken?: string;
/**
* The page token for the next page of changes. This will be absent if the end of the changes list has been reached. If the token is rejected for any
* reason, it should be discarded, and pagination should be restarted from the first page of results.
*/
nextPageToken?: string;
}
interface Channel {
/** The address where notifications are delivered for this channel. */
address?: string;
/** Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional. */
expiration?: string;
/** A UUID or similar unique string that identifies this channel. */
id?: string;
/** Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string "api#channel". */
kind?: string;
/** Additional parameters controlling delivery channel behavior. Optional. */
params?: Record<string, string>;
/** A Boolean value to indicate whether payload is wanted. Optional. */
payload?: boolean;
/** An opaque ID that identifies the resource being watched on this channel. Stable across different API versions. */
resourceId?: string;
/** A version-specific identifier for the watched resource. */
resourceUri?: string;
/** An arbitrary string delivered to the target address with each notification delivered over this channel. Optional. */
token?: string;
/** The type of delivery mechanism used for this channel. */
type?: string;
}
interface Comment {
/** A region of the document represented as a JSON string. See anchor documentation for details on how to define and interpret anchor properties. */
anchor?: string;
/** The user who created the comment. */
author?: User;
/** The plain text content of the comment. This field is used for setting the content, while htmlContent should be displayed. */
content?: string;
/** The time at which the comment was created (RFC 3339 date-time). */
createdTime?: string;
/** Whether the comment has been deleted. A deleted comment has no content. */
deleted?: boolean;
/** The content of the comment with HTML formatting. */
htmlContent?: string;
/** The ID of the comment. */
id?: string;
/** Identifies what kind of resource this is. Value: the fixed string "drive#comment". */
kind?: string;
/** The last time the comment or any of its replies was modified (RFC 3339 date-time). */
modifiedTime?: string;
/**
* The file content to which the comment refers, typically within the anchor region. For a text file, for example, this would be the text at the location
* of the comment.
*/
quotedFileContent?: {
/** The MIME type of the quoted content. */
mimeType?: string;
/** The quoted content itself. This is interpreted as plain text if set through the API. */
value?: string;
};
/** The full list of replies to the comment in chronological order. */
replies?: Reply[];
/** Whether the comment has been resolved by one of its replies. */
resolved?: boolean;
}
interface CommentList {
/** The list of comments. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched. */
comments?: Comment[];
/** Identifies what kind of resource this is. Value: the fixed string "drive#commentList". */
kind?: string;
/**
* The page token for the next page of comments. This will be absent if the end of the comments list has been reached. If the token is rejected for any
* reason, it should be discarded, and pagination should be restarted from the first page of results.
*/
nextPageToken?: string;
}
interface File {
/**
* A collection of arbitrary key-value pairs which are private to the requesting app.
* Entries with null values are cleared in update and copy requests.
*/
appProperties?: Record<string, string>;
/** Capabilities the current user has on this file. Each capability corresponds to a fine-grained action that a user may take. */
capabilities?: {
/** Whether the current user can add children to this folder. This is always false when the item is not a folder. */
canAddChildren?: boolean;
/** Whether the current user can change whether viewers can copy the contents of this file. */
canChangeViewersCanCopyContent?: boolean;
/** Whether the current user can comment on this file. */
canComment?: boolean;
/**
* Whether the current user can copy this file. For a Team Drive item, whether the current user can copy non-folder descendants of this item, or this item
* itself if it is not a folder.
*/
canCopy?: boolean;
/** Whether the current user can delete this file. */
canDelete?: boolean;
/** Whether the current user can download this file. */
canDownload?: boolean;
/** Whether the current user can edit this file. */
canEdit?: boolean;
/** Whether the current user can list the children of this folder. This is always false when the item is not a folder. */
canListChildren?: boolean;
/** Whether the current user can move this item into a Team Drive. If the item is in a Team Drive, this field is equivalent to canMoveTeamDriveItem. */
canMoveItemIntoTeamDrive?: boolean;
/**
* Whether the current user can move this Team Drive item by changing its parent. Note that a request to change the parent for this item may still fail
* depending on the new parent that is being added. Only populated for Team Drive files.
*/
canMoveTeamDriveItem?: boolean;
/**
* Whether the current user can read the revisions resource of this file. For a Team Drive item, whether revisions of non-folder descendants of this item,
* or this item itself if it is not a folder, can be read.
*/
canReadRevisions?: boolean;
/** Whether the current user can read the Team Drive to which this file belongs. Only populated for Team Drive files. */
canReadTeamDrive?: boolean;
/** Whether the current user can remove children from this folder. This is always false when the item is not a folder. */
canRemoveChildren?: boolean;
/** Whether the current user can rename this file. */
canRename?: boolean;
/** Whether the current user can modify the sharing settings for this file. */
canShare?: boolean;
/** Whether the current user can move this file to trash. */
canTrash?: boolean;
/** Whether the current user can restore this file from trash. */
canUntrash?: boolean;
};
/** Additional information about the content of the file. These fields are never populated in responses. */
contentHints?: {
/** Text to be indexed for the file to improve fullText queries. This is limited to 128KB in length and may contain HTML elements. */
indexableText?: string;
/** A thumbnail for the file. This will only be used if Drive cannot generate a standard thumbnail. */
thumbnail?: {
/** The thumbnail data encoded with URL-safe Base64 (RFC 4648 section 5). */
image?: string;
/** The MIME type of the thumbnail. */
mimeType?: string;
};
};
/** The time at which the file was created (RFC 3339 date-time). */
createdTime?: string;
/** A short description of the file. */
description?: string;
/** Whether the file has been explicitly trashed, as opposed to recursively trashed from a parent folder. */
explicitlyTrashed?: boolean;
/** The final component of fullFileExtension. This is only available for files with binary content in Drive. */
fileExtension?: string;
/**
* The color for a folder as an RGB hex string. The supported colors are published in the folderColorPalette field of the About resource.
* If an unsupported color is specified, the closest color in the palette will be used instead.
*/
folderColorRgb?: string;
/**
* The full file extension extracted from the name field. May contain multiple concatenated extensions, such as "tar.gz". This is only available for files
* with binary content in Drive.
* This is automatically updated when the name field changes, however it is not cleared if the new name does not contain a valid extension.
*/
fullFileExtension?: string;
/** Whether any users are granted file access directly on this file. This field is only populated for Team Drive files. */
hasAugmentedPermissions?: boolean;
/**
* Whether this file has a thumbnail. This does not indicate whether the requesting app has access to the thumbnail. To check access, look for the
* presence of the thumbnailLink field.
*/
hasThumbnail?: boolean;
/** The ID of the file's head revision. This is currently only available for files with binary content in Drive. */
headRevisionId?: string;
/** A static, unauthenticated link to the file's icon. */
iconLink?: string;
/** The ID of the file. */
id?: string;
/** Additional metadata about image media, if available. */
imageMediaMetadata?: {
/** The aperture used to create the photo (f-number). */
aperture?: number;
/** The make of the camera used to create the photo. */
cameraMake?: string;
/** The model of the camera used to create the photo. */
cameraModel?: string;
/** The color space of the photo. */
colorSpace?: string;
/** The exposure bias of the photo (APEX value). */
exposureBias?: number;
/** The exposure mode used to create the photo. */
exposureMode?: string;
/** The length of the exposure, in seconds. */
exposureTime?: number;
/** Whether a flash was used to create the photo. */
flashUsed?: boolean;
/** The focal length used to create the photo, in millimeters. */
focalLength?: number;
/** The height of the image in pixels. */
height?: number;
/** The ISO speed used to create the photo. */
isoSpeed?: number;
/** The lens used to create the photo. */
lens?: string;
/** Geographic location information stored in the image. */
location?: {
/** The altitude stored in the image. */
altitude?: number;
/** The latitude stored in the image. */
latitude?: number;
/** The longitude stored in the image. */
longitude?: number;
};
/** The smallest f-number of the lens at the focal length used to create the photo (APEX value). */
maxApertureValue?: number;
/** The metering mode used to create the photo. */
meteringMode?: string;
/** The rotation in clockwise degrees from the image's original orientation. */
rotation?: number;
/** The type of sensor used to create the photo. */
sensor?: string;
/** The distance to the subject of the photo, in meters. */
subjectDistance?: number;
/** The date and time the photo was taken (EXIF DateTime). */
time?: string;
/** The white balance mode used to create the photo. */
whiteBalance?: string;
/** The width of the image in pixels. */
width?: number;
};
/** Whether the file was created or opened by the requesting app. */
isAppAuthorized?: boolean;
/** Identifies what kind of resource this is. Value: the fixed string "drive#file". */
kind?: string;
/** The last user to modify the file. */
lastModifyingUser?: User;
/** The MD5 checksum for the content of the file. This is only applicable to files with binary content in Drive. */
md5Checksum?: string;
/**
* The MIME type of the file.
* Drive will attempt to automatically detect an appropriate value from uploaded content if no value is provided. The value cannot be changed unless a new
* revision is uploaded.
* If a file is created with a Google Doc MIME type, the uploaded content will be imported if possible. The supported import formats are published in the
* About resource.
*/
mimeType?: string;
/** Whether the file has been modified by this user. */
modifiedByMe?: boolean;
/** The last time the file was modified by the user (RFC 3339 date-time). */
modifiedByMeTime?: string;
/**
* The last time the file was modified by anyone (RFC 3339 date-time).
* Note that setting modifiedTime will also update modifiedByMeTime for the user.
*/
modifiedTime?: string;
/**
* The name of the file. This is not necessarily unique within a folder. Note that for immutable items such as the top level folders of Team Drives, My
* Drive root folder, and Application Data folder the name is constant.
*/
name?: string;
/**
* The original filename of the uploaded content if available, or else the original value of the name field. This is only available for files with binary
* content in Drive.
*/
originalFilename?: string;
/** Whether the user owns the file. Not populated for Team Drive files. */
ownedByMe?: boolean;
/** The owners of the file. Currently, only certain legacy files may have more than one owner. Not populated for Team Drive files. */
owners?: User[];
/**
* The IDs of the parent folders which contain the file.
* If not specified as part of a create request, the file will be placed directly in the My Drive folder. Update requests must use the addParents and
* removeParents parameters to modify the values.
*/
parents?: string[];
/** List of permission IDs for users with access to this file. */
permissionIds?: string[];
/** The full list of permissions for the file. This is only available if the requesting user can share the file. Not populated for Team Drive files. */
permissions?: Permission[];
/**
* A collection of arbitrary key-value pairs which are visible to all apps.
* Entries with null values are cleared in update and copy requests.
*/
properties?: Record<string, string>;
/** The number of storage quota bytes used by the file. This includes the head revision as well as previous revisions with keepForever enabled. */
quotaBytesUsed?: string;
/** Whether the file has been shared. Not populated for Team Drive files. */
shared?: boolean;
/** The time at which the file was shared with the user, if applicable (RFC 3339 date-time). */
sharedWithMeTime?: string;
/** The user who shared the file with the requesting user, if applicable. */
sharingUser?: User;
/** The size of the file's content in bytes. This is only applicable to files with binary content in Drive. */
size?: string;
/** The list of spaces which contain the file. The currently supported values are 'drive', 'appDataFolder' and 'photos'. */
spaces?: string[];
/** Whether the user has starred the file. */
starred?: boolean;
/** ID of the Team Drive the file resides in. */
teamDriveId?: string;
/**
* A short-lived link to the file's thumbnail, if available. Typically lasts on the order of hours. Only populated when the requesting app can access the
* file's content.
*/
thumbnailLink?: string;
/** The thumbnail version for use in thumbnail cache invalidation. */
thumbnailVersion?: string;
/**
* Whether the file has been trashed, either explicitly or from a trashed parent folder. Only the owner may trash a file, and other users cannot see files
* in the owner's trash.
*/
trashed?: boolean;
/** The time that the item was trashed (RFC 3339 date-time). Only populated for Team Drive files. */
trashedTime?: string;
/** If the file has been explicitly trashed, the user who trashed it. Only populated for Team Drive files. */
trashingUser?: User;
/** A monotonically increasing version number for the file. This reflects every change made to the file on the server, even those not visible to the user. */
version?: string;
/** Additional metadata about video media. This may not be available immediately upon upload. */
videoMediaMetadata?: {
/** The duration of the video in milliseconds. */
durationMillis?: string;
/** The height of the video in pixels. */
height?: number;
/** The width of the video in pixels. */
width?: number;
};
/** Whether the file has been viewed by this user. */
viewedByMe?: boolean;
/** The last time the file was viewed by the user (RFC 3339 date-time). */
viewedByMeTime?: string;
/** Whether users with only reader or commenter permission can copy the file's content. This affects copy, download, and print operations. */
viewersCanCopyContent?: boolean;
/** A link for downloading the content of the file in a browser. This is only available for files with binary content in Drive. */
webContentLink?: string;
/** A link for opening the file in a relevant Google editor or viewer in a browser. */
webViewLink?: string;
/** Whether users with only writer permission can modify the file's permissions. Not populated for Team Drive files. */
writersCanShare?: boolean;
}
interface FileList {
/** The list of files. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched. */
files?: File[];
/**
* Whether the search process was incomplete. If true, then some search results may be missing, since all documents were not searched. This may occur when
* searching multiple Team Drives with the "user,allTeamDrives" corpora, but all corpora could not be searched. When this happens, it is suggested that
* clients narrow their query by choosing a different corpus such as "user" or "teamDrive".
*/
incompleteSearch?: boolean;
/** Identifies what kind of resource this is. Value: the fixed string "drive#fileList". */
kind?: string;
/**
* The page token for the next page of files. This will be absent if the end of the files list has been reached. If the token is rejected for any reason,
* it should be discarded, and pagination should be restarted from the first page of results.
*/
nextPageToken?: string;
}
interface GeneratedIds {
/** The IDs generated for the requesting user in the specified space. */
ids?: string[];
/** Identifies what kind of resource this is. Value: the fixed string "drive#generatedIds". */
kind?: string;
/** The type of file that can be created with these IDs. */
space?: string;
}
interface Permission {
/** Whether the permission allows the file to be discovered through search. This is only applicable for permissions of type domain or anyone. */
allowFileDiscovery?: boolean;
/** Whether the account associated with this permission has been deleted. This field only pertains to user and group permissions. */
deleted?: boolean;
/** A displayable name for users, groups or domains. */
displayName?: string;
/** The domain to which this permission refers. */
domain?: string;
/** The email address of the user or group to which this permission refers. */
emailAddress?: string;
/**
* The time at which this permission will expire (RFC 3339 date-time). Expiration times have the following restrictions:
* - They can only be set on user and group permissions
* - The time must be in the future
* - The time cannot be more than a year in the future
*/
expirationTime?: string;
/** The ID of this permission. This is a unique identifier for the grantee, and is published in User resources as permissionId. */
id?: string;
/** Identifies what kind of resource this is. Value: the fixed string "drive#permission". */
kind?: string;
/** A link to the user's profile photo, if available. */
photoLink?: string;
/**
* The role granted by this permission. While new values may be supported in the future, the following are currently allowed:
* - organizer
* - owner
* - writer
* - commenter
* - reader
*/
role?: string;
/**
* Details of whether the permissions on this Team Drive item are inherited or directly on this item. This is an output-only field which is present only
* for Team Drive items.
*/
teamDrivePermissionDetails?: Array<{
/** Whether this permission is inherited. This field is always populated. This is an output-only field. */
inherited?: boolean;
/** The ID of the item from which this permission is inherited. This is an output-only field and is only populated for members of the Team Drive. */
inheritedFrom?: string;
/**
* The primary role for this user. While new values may be added in the future, the following are currently possible:
* - organizer
* - writer
* - commenter
* - reader
*/
role?: string;
/**
* The Team Drive permission type for this user. While new values may be added in future, the following are currently possible:
* - file
* - member
*/
teamDrivePermissionType?: string;
}>;
/**
* The type of the grantee. Valid values are:
* - user
* - group
* - domain
* - anyone
*/
type?: string;
}
interface PermissionList {
/** Identifies what kind of resource this is. Value: the fixed string "drive#permissionList". */
kind?: string;
/**
* The page token for the next page of permissions. This field will be absent if the end of the permissions list has been reached. If the token is
* rejected for any reason, it should be discarded, and pagination should be restarted from the first page of results.
*/
nextPageToken?: string;
/** The list of permissions. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched. */
permissions?: Permission[];
}
interface Reply {
/**
* The action the reply performed to the parent comment. Valid values are:
* - resolve
* - reopen
*/
action?: string;
/** The user who created the reply. */
author?: User;
/**
* The plain text content of the reply. This field is used for setting the content, while htmlContent should be displayed. This is required on creates if
* no action is specified.
*/
content?: string;
/** The time at which the reply was created (RFC 3339 date-time). */
createdTime?: string;
/** Whether the reply has been deleted. A deleted reply has no content. */
deleted?: boolean;
/** The content of the reply with HTML formatting. */
htmlContent?: string;
/** The ID of the reply. */
id?: string;
/** Identifies what kind of resource this is. Value: the fixed string "drive#reply". */
kind?: string;
/** The last time the reply was modified (RFC 3339 date-time). */
modifiedTime?: string;
}
interface ReplyList {
/** Identifies what kind of resource this is. Value: the fixed string "drive#replyList". */
kind?: string;
/**
* The page token for the next page of replies. This will be absent if the end of the replies list has been reached. If the token is rejected for any
* reason, it should be discarded, and pagination should be restarted from the first page of results.
*/
nextPageToken?: string;
/** The list of replies. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched. */
replies?: Reply[];
}
interface Revision {
/** The ID of the revision. */
id?: string;
/**
* Whether to keep this revision forever, even if it is no longer the head revision. If not set, the revision will be automatically purged 30 days after
* newer content is uploaded. This can be set on a maximum of 200 revisions for a file.
* This field is only applicable to files with binary content in Drive.
*/
keepForever?: boolean;
/** Identifies what kind of resource this is. Value: the fixed string "drive#revision". */
kind?: string;
/** The last user to modify this revision. */
lastModifyingUser?: User;
/** The MD5 checksum of the revision's content. This is only applicable to files with binary content in Drive. */
md5Checksum?: string;
/** The MIME type of the revision. */
mimeType?: string;
/** The last time the revision was modified (RFC 3339 date-time). */
modifiedTime?: string;
/** The original filename used to create this revision. This is only applicable to files with binary content in Drive. */
originalFilename?: string;
/** Whether subsequent revisions will be automatically republished. This is only applicable to Google Docs. */
publishAuto?: boolean;
/** Whether this revision is published. This is only applicable to Google Docs. */
published?: boolean;
/** Whether this revision is published outside the domain. This is only applicable to Google Docs. */
publishedOutsideDomain?: boolean;
/** The size of the revision's content in bytes. This is only applicable to files with binary content in Drive. */
size?: string;
}
interface RevisionList {
/** Identifies what kind of resource this is. Value: the fixed string "drive#revisionList". */
kind?: string;
/**
* The page token for the next page of revisions. This will be absent if the end of the revisions list has been reached. If the token is rejected for any
* reason, it should be discarded, and pagination should be restarted from the first page of results.
*/
nextPageToken?: string;
/** The list of revisions. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched. */
revisions?: Revision[];
}
interface StartPageToken {
/** Identifies what kind of resource this is. Value: the fixed string "drive#startPageToken". */
kind?: string;
/** The starting page token for listing changes. */
startPageToken?: string;
}
interface TeamDrive {
/**
* An image file and cropping parameters from which a background image for this Team Drive is set. This is a write only field; it can only be set on
* drive.teamdrives.update requests that don't set themeId. When specified, all fields of the backgroundImageFile must be set.
*/
backgroundImageFile?: {
/** The ID of an image file in Drive to use for the background image. */
id?: string;
/**
* The width of the cropped image in the closed range of 0 to 1. This value represents the width of the cropped image divided by the width of the entire
* image. The height is computed by applying a width to height aspect ratio of 80 to 9. The resulting image must be at least 1280 pixels wide and 144
* pixels high.
*/
width?: number;
/**
* The X coordinate of the upper left corner of the cropping area in the background image. This is a value in the closed range of 0 to 1. This value
* represents the horizontal distance from the left side of the entire image to the left side of the cropping area divided by the width of the entire
* image.
*/
xCoordinate?: number;
/**
* The Y coordinate of the upper left corner of the cropping area in the background image. This is a value in the closed range of 0 to 1. This value
* represents the vertical distance from the top side of the entire image to the top side of the cropping area divided by the height of the entire image.
*/
yCoordinate?: number;
};
/** A short-lived link to this Team Drive's background image. */
backgroundImageLink?: string;
/** Capabilities the current user has on this Team Drive. */
capabilities?: {
/** Whether the current user can add children to folders in this Team Drive. */
canAddChildren?: boolean;
/** Whether the current user can change the background of this Team Drive. */
canChangeTeamDriveBackground?: boolean;
/** Whether the current user can comment on files in this Team Drive. */
canComment?: boolean;
/** Whether the current user can copy files in this Team Drive. */
canCopy?: boolean;
/**
* Whether the current user can delete this Team Drive. Attempting to delete the Team Drive may still fail if there are untrashed items inside the Team
* Drive.
*/
canDeleteTeamDrive?: boolean;
/** Whether the current user can download files in this Team Drive. */
canDownload?: boolean;
/** Whether the current user can edit files in this Team Drive */
canEdit?: boolean;
/** Whether the current user can list the children of folders in this Team Drive. */
canListChildren?: boolean;
/** Whether the current user can add members to this Team Drive or remove them or change their role. */
canManageMembers?: boolean;
/** Whether the current user can read the revisions resource of files in this Team Drive. */
canReadRevisions?: boolean;
/** Whether the current user can remove children from folders in this Team Drive. */
canRemoveChildren?: boolean;
/** Whether the current user can rename files or folders in this Team Drive. */
canRename?: boolean;
/** Whether the current user can rename this Team Drive. */
canRenameTeamDrive?: boolean;
/** Whether the current user can share files or folders in this Team Drive. */
canShare?: boolean;
};
/** The color of this Team Drive as an RGB hex string. It can only be set on a drive.teamdrives.update request that does not set themeId. */
colorRgb?: string;
/** The ID of this Team Drive which is also the ID of the top level folder for this Team Drive. */
id?: string;
/** Identifies what kind of resource this is. Value: the fixed string "drive#teamDrive". */
kind?: string;
/** The name of this Team Drive. */
name?: string;
/**
* The ID of the theme from which the background image and color will be set. The set of possible teamDriveThemes can be retrieved from a drive.about.get
* response. When not specified on a drive.teamdrives.create request, a random theme is chosen from which the background image and color are set. This is
* a write-only field; it can only be set on requests that don't set colorRgb or backgroundImageFile.
*/
themeId?: string;
}
interface TeamDriveList {
/** Identifies what kind of resource this is. Value: the fixed string "drive#teamDriveList". */
kind?: string;
/**
* The page token for the next page of Team Drives. This will be absent if the end of the Team Drives list has been reached. If the token is rejected for
* any reason, it should be discarded, and pagination should be restarted from the first page of results.
*/
nextPageToken?: string;
/** The list of Team Drives. If nextPageToken is populated, then this list may be incomplete and an additional page of results should be fetched. */
teamDrives?: TeamDrive[];
}
interface User {
/** A plain text displayable name for this user. */
displayName?: string;
/** The email address of the user. This may not be present in certain contexts if the user has not made their email address visible to the requester. */
emailAddress?: string;
/** Identifies what kind of resource this is. Value: the fixed string "drive#user". */
kind?: string;
/** Whether this user is the requesting user. */
me?: boolean;
/** The user's ID as visible in Permission resources. */
permissionId?: string;
/** A link to the user's profile photo, if available. */
photoLink?: string;
}
interface AboutResource {
/** Gets information about the user, the user's Drive, and system capabilities. */
get(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<About>;
}
interface ChangesResource {
/** Gets the starting pageToken for listing future changes. */
getStartPageToken(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** The ID of the Team Drive for which the starting pageToken for listing future changes from that Team Drive will be returned. */
teamDriveId?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<StartPageToken>;
/** Lists the changes for a user or Team Drive. */
list(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/**
* Whether changes should include the file resource if the file is still accessible by the user at the time of the request, even when a file was removed
* from the list of changes and there will be no further change entries for this file.
*/
includeCorpusRemovals?: boolean;
/** Whether to include changes indicating that items have been removed from the list of changes, for example by deletion or loss of access. */
includeRemoved?: boolean;
/** Whether Team Drive files or changes should be included in results. */
includeTeamDriveItems?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** The maximum number of changes to return per page. */
pageSize?: number;
/**
* The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to
* the response from the getStartPageToken method.
*/
pageToken: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/**
* Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or
* shared files which have not been added to My Drive.
*/
restrictToMyDrive?: boolean;
/** A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'. */
spaces?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/**
* The Team Drive from which changes will be returned. If specified the change IDs will be reflective of the Team Drive; use the combined Team Drive ID
* and change ID as an identifier.
*/
teamDriveId?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<ChangeList>;
/** Subscribes to changes for a user. */
watch(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/**
* Whether changes should include the file resource if the file is still accessible by the user at the time of the request, even when a file was removed
* from the list of changes and there will be no further change entries for this file.
*/
includeCorpusRemovals?: boolean;
/** Whether to include changes indicating that items have been removed from the list of changes, for example by deletion or loss of access. */
includeRemoved?: boolean;
/** Whether Team Drive files or changes should be included in results. */
includeTeamDriveItems?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** The maximum number of changes to return per page. */
pageSize?: number;
/**
* The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response or to
* the response from the getStartPageToken method.
*/
pageToken: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/**
* Whether to restrict the results to changes inside the My Drive hierarchy. This omits changes to files such as those in the Application Data folder or
* shared files which have not been added to My Drive.
*/
restrictToMyDrive?: boolean;
/** A comma-separated list of spaces to query within the user corpus. Supported values are 'drive', 'appDataFolder' and 'photos'. */
spaces?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/**
* The Team Drive from which changes will be returned. If specified the change IDs will be reflective of the Team Drive; use the combined Team Drive ID
* and change ID as an identifier.
*/
teamDriveId?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Channel>;
}
interface ChannelsResource {
/** Stop watching resources through this channel */
stop(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<void>;
}
interface CommentsResource {
/** Creates a new comment on a file. */
create(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Comment>;
/** Deletes a comment. */
delete(request: {
/** Data format for the response. */
alt?: string;
/** The ID of the comment. */
commentId: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<void>;
/** Gets a comment by ID. */
get(request: {
/** Data format for the response. */
alt?: string;
/** The ID of the comment. */
commentId: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** Whether to return deleted comments. Deleted comments will not include their original content. */
includeDeleted?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Comment>;
/** Lists a file's comments. */
list(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** Whether to include deleted comments. Deleted comments will not include their original content. */
includeDeleted?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** The maximum number of comments to return per page. */
pageSize?: number;
/** The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response. */
pageToken?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The minimum value of 'modifiedTime' for the result comments (RFC 3339 date-time). */
startModifiedTime?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<CommentList>;
/** Updates a comment with patch semantics. */
update(request: {
/** Data format for the response. */
alt?: string;
/** The ID of the comment. */
commentId: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Comment>;
}
interface FilesResource {
/** Creates a copy of a file and applies any requested updates with patch semantics. */
copy(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/**
* Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to
* the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.
*/
ignoreDefaultVisibility?: boolean;
/** Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive. */
keepRevisionForever?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** A language hint for OCR processing during image import (ISO 639-1 code). */
ocrLanguage?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<File>;
/** Creates a new file. */
create(request: {
resource: {
/**
* The name of the file. This is not necessarily unique within a folder.
* Note that for immutable items such as the top level folders of Team Drives, My Drive root folder, and Application Data folder the name is constant.
*/
name: string;
/**
* The MIME type of the file.
* Drive will attempt to automatically detect an appropriate value from uploaded content if no value is provided. The value cannot be changed unless a new revision is uploaded.
*
* If a file is created with a Google Doc MIME type, the uploaded content will be imported if possible. The supported import formats are published in the About resource.
*/
mimeType: string;
/**
* The IDs of the parent folders which contain the file.
* If not specified as part of a create request, the file will be placed directly in the My Drive folder. Update requests must use the addParents and removeParents parameters to modify the values.
*/
parents: string[];
};
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/**
* Whether to ignore the domain's default visibility settings for the created file. Domain administrators can choose to make all uploaded files visible to
* the domain by default; this parameter bypasses that behavior for the request. Permissions are still inherited from parent folders.
*/
ignoreDefaultVisibility?: boolean;
/** Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive. */
keepRevisionForever?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** A language hint for OCR processing during image import (ISO 639-1 code). */
ocrLanguage?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** Whether to use the uploaded content as indexable text. */
useContentAsIndexableText?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<File>;
/**
* Permanently deletes a file owned by the user without moving it to the trash. If the file belongs to a Team Drive the user must be an organizer on the
* parent. If the target is a folder, all descendants owned by the user are also deleted.
*/
delete(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<void>;
/** Permanently deletes all of the user's trashed files. */
emptyTrash(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<void>;
/** Exports a Google Doc to the requested MIME type and returns the exported content. Please note that the exported content is limited to 10MB. */
export(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** The MIME type of the format requested for this export. */
mimeType: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<void>;
/** Generates a set of file IDs which can be provided in create requests. */
generateIds(request: {
/** Data format for the response. */
alt?: string;
/** The number of IDs to return. */
count?: number;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The space in which the IDs can be used to create new files. Supported values are 'drive' and 'appDataFolder'. */
space?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<GeneratedIds>;
/** Gets a file's metadata or content by ID. */
get(request: {
/** Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media. */
acknowledgeAbuse?: boolean;
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<File>;
/** Lists or searches files. */
list(request: {
/** Data format for the response. */
alt?: string;
/**
* Comma-separated list of bodies of items (files/documents) to which the query applies. Supported bodies are 'user', 'domain', 'teamDrive' and
* 'allTeamDrives'. 'allTeamDrives' must be combined with 'user'; all other values must be used in isolation. Prefer 'user' or 'teamDrive' to
* 'allTeamDrives' for efficiency.
*/
corpora?: string;
/** The source of files to list. Deprecated: use 'corpora' instead. */
corpus?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** Whether Team Drive items should be included in results. */
includeTeamDriveItems?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/**
* A comma-separated list of sort keys. Valid keys are 'createdTime', 'folder', 'modifiedByMeTime', 'modifiedTime', 'name', 'name_natural',
* 'quotaBytesUsed', 'recency', 'sharedWithMeTime', 'starred', and 'viewedByMeTime'. Each key sorts ascending by default, but may be reversed with the
* 'desc' modifier. Example usage: ?orderBy=folder,modifiedTime desc,name. Please note that there is a current limitation for users with approximately one
* million files in which the requested sort order is ignored.
*/
orderBy?: string;
/** The maximum number of files to return per page. Partial or empty result pages are possible even before the end of the files list has been reached. */
pageSize?: number;
/** The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response. */
pageToken?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/** A query for filtering the file results. See the "Search for Files" guide for supported syntax. */
q?: string;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** A comma-separated list of spaces to query within the corpus. Supported values are 'drive', 'appDataFolder' and 'photos'. */
spaces?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** ID of Team Drive to search. */
teamDriveId?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<FileList>;
/** Updates a file's metadata and/or content with patch semantics. */
update(request: {
/** A comma-separated list of parent IDs to add. */
addParents?: string;
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** Whether to set the 'keepForever' field in the new head revision. This is only applicable to files with binary content in Drive. */
keepRevisionForever?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** A language hint for OCR processing during image import (ISO 639-1 code). */
ocrLanguage?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** A comma-separated list of parent IDs to remove. */
removeParents?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** Whether to use the uploaded content as indexable text. */
useContentAsIndexableText?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<File>;
/** Subscribes to changes to a file */
watch(request: {
/** Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media. */
acknowledgeAbuse?: boolean;
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Channel>;
}
interface PermissionsResource {
/** Creates a permission for a file or Team Drive. */
create(request: {
/** Data format for the response. */
alt?: string;
/** A custom message to include in the notification email. */
emailMessage?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file or Team Drive. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/**
* Whether to send a notification email when sharing to users or groups. This defaults to true for users and groups, and is not allowed for other
* requests. It must not be disabled for ownership transfers.
*/
sendNotificationEmail?: boolean;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/**
* Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of
* the side effect.
*/
transferOwnership?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Permission>;
/** Deletes a permission. */
delete(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file or Team Drive. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** The ID of the permission. */
permissionId: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<void>;
/** Gets a permission by ID. */
get(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** The ID of the permission. */
permissionId: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Permission>;
/** Lists a file's or Team Drive's permissions. */
list(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file or Team Drive. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/**
* The maximum number of permissions to return per page. When not set for files in a Team Drive, at most 100 results will be returned. When not set for
* files that are not in a Team Drive, the entire list will be returned.
*/
pageSize?: number;
/** The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response. */
pageToken?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<PermissionList>;
/** Updates a permission with patch semantics. */
update(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file or Team Drive. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** The ID of the permission. */
permissionId: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** Whether to remove the expiration date. */
removeExpiration?: boolean;
/** Whether the requesting application supports Team Drives. */
supportsTeamDrives?: boolean;
/**
* Whether to transfer ownership to the specified user and downgrade the current owner to a writer. This parameter is required as an acknowledgement of
* the side effect.
*/
transferOwnership?: boolean;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Permission>;
}
interface RepliesResource {
/** Creates a new reply to a comment. */
create(request: {
/** Data format for the response. */
alt?: string;
/** The ID of the comment. */
commentId: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Reply>;
/** Deletes a reply. */
delete(request: {
/** Data format for the response. */
alt?: string;
/** The ID of the comment. */
commentId: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The ID of the reply. */
replyId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<void>;
/** Gets a reply by ID. */
get(request: {
/** Data format for the response. */
alt?: string;
/** The ID of the comment. */
commentId: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** Whether to return deleted replies. Deleted replies will not include their original content. */
includeDeleted?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The ID of the reply. */
replyId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Reply>;
/** Lists a comment's replies. */
list(request: {
/** Data format for the response. */
alt?: string;
/** The ID of the comment. */
commentId: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** Whether to include deleted replies. Deleted replies will not include their original content. */
includeDeleted?: boolean;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** The maximum number of replies to return per page. */
pageSize?: number;
/** The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response. */
pageToken?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<ReplyList>;
/** Updates a reply with patch semantics. */
update(request: {
/** Data format for the response. */
alt?: string;
/** The ID of the comment. */
commentId: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The ID of the reply. */
replyId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Reply>;
}
interface RevisionsResource {
/** Permanently deletes a revision. This method is only applicable to files with binary content in Drive. */
delete(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The ID of the revision. */
revisionId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<void>;
/** Gets a revision's metadata or content by ID. */
get(request: {
/** Whether the user is acknowledging the risk of downloading known malware or other abusive files. This is only applicable when alt=media. */
acknowledgeAbuse?: boolean;
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The ID of the revision. */
revisionId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Revision>;
/** Lists a file's revisions. */
list(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** The maximum number of revisions to return per page. */
pageSize?: number;
/** The token for continuing a previous list request on the next page. This should be set to the value of 'nextPageToken' from the previous response. */
pageToken?: string;
/** Returns response with indentations and line breaks. */<|fim▁hole|> /**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<RevisionList>;
/** Updates a revision with patch semantics. */
update(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** The ID of the file. */
fileId: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The ID of the revision. */
revisionId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<Revision>;
}
interface TeamdrivesResource {
/** Creates a new Team Drive. */
create(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/**
* An ID, such as a random UUID, which uniquely identifies this user's request for idempotent creation of a Team Drive. A repeated request by the same
* user and with the same request ID will avoid creating duplicates by attempting to create the same Team Drive. If the Team Drive already exists a 409
* error will be returned.
*/
requestId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<TeamDrive>;
/** Permanently deletes a Team Drive for which the user is an organizer. The Team Drive cannot contain any untrashed items. */
delete(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The ID of the Team Drive */
teamDriveId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<void>;
/** Gets a Team Drive's metadata by ID. */
get(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The ID of the Team Drive */
teamDriveId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<TeamDrive>;
/** Lists the user's Team Drives. */
list(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Maximum number of Team Drives to return. */
pageSize?: number;
/** Page token for Team Drives. */
pageToken?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<TeamDriveList>;
/** Updates a Team Drive's metadata */
update(request: {
/** Data format for the response. */
alt?: string;
/** Selector specifying which fields to include in a partial response. */
fields?: string;
/** API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. */
key?: string;
/** OAuth 2.0 token for the current user. */
oauth_token?: string;
/** Returns response with indentations and line breaks. */
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
* Overrides userIp if both are provided.
*/
quotaUser?: string;
/** The ID of the Team Drive */
teamDriveId: string;
/** IP address of the site where the request originates. Use this if you want to enforce per-user limits. */
userIp?: string;
}): Request<TeamDrive>;
}
}
}<|fim▁end|>
|
prettyPrint?: boolean;
|
<|file_name|>transformations.js<|end_file_name|><|fim▁begin|>/*
* The MIT License
*
* Copyright 2015 Eduardo Weiland.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
define(['knockout', 'grammar', 'productionrule', 'utils'], function(ko, Grammar, ProductionRule, utils) {
'use strict';
/**
* Encontra todos os símbolos não-terminais inalcançáveis dentro de uma gramática.
*
* @param {Grammar} grammar Gramática para ser verificada.
* @return {string[]} Lista de símbolos inalcançáveis.
*/
function findUnreachableSymbols(grammar) {
var unreachable = [],
nt = grammar.nonTerminalSymbols(),
s = grammar.productionStartSymbol();
for (var i = 0, l = nt.length; i < l; ++i) {
// Ignora símbolo de início de produção
if (nt[i] === s) {
continue;
}
var found = false;
for (var j = 0, k = nt.length; j < k && !found; ++j) {
if (i === j) {<|fim▁hole|> // Ignora produções do próprio símbolo
continue;
}
var prods = grammar.getProductions(nt[j]);
for (var x = 0, y = prods.length; x < y; ++x) {
if (prods[x].indexOf(nt[i]) !== -1) {
found = true;
break;
}
}
}
if (!found) {
unreachable.push(nt[i]);
}
}
return unreachable;
}
function findSterileSymbols(grammar) {
var steriles = [],
rules = grammar.productionRules();
for (var i = 0, l = rules.length; i < l; ++i) {
var found = false,
left = rules[i].leftSide(),
right = rules[i].rightSide();
for (var j = 0, k = right.length; j < k && !found; ++j) {
if (right[j].indexOf(left) === -1) {
found = true;
break;
}
}
if (!found) {
steriles.push(left);
}
}
return steriles;
}
/**
* Substitui símbolos não terminais no começo de todas as produções pelas suas produções.
*
* @param {Grammar} grammar Gramática para ser modificada.
* @return {ProductionRule[]} Regras de produção modificadas.
*/
function replaceStartingSymbols(grammar) {
var rules = grammar.productionRules();
var nt = grammar.nonTerminalSymbols();
var s = grammar.productionStartSymbol();
for (var i = 0, l = rules.length; i < l; ++i) {
var left = rules[i].leftSide();
if (left === s) {
// Ignora produção inicial
continue;
}
var prods = rules[i].rightSide();
// Não usa cache do length porque o array é modificado internamente
for (var j = 0; j < prods.length; ++j) {
if ( (prods[j][0] === left) || (nt.indexOf(prods[j][0]) === -1) ) {
// Produção começa com o próprio símbolo não-terminal (recursivo) ou
// não começa com nenhum símbolo não-terminal, ignora as substituições
continue;
}
var otherProds = grammar.getProductions(prods[j][0]);
var rest = prods[j].substr(1);
for (var k = 0, m = otherProds.length; k < m; ++k) {
otherProds[k] = otherProds[k] + rest;
}
// Remove a produção que começa com não-terminal e adiciona as novas produções no lugar
prods.splice.apply(prods, [j--, 1].concat(otherProds));
}
}
return rules;
}
return {
/**
* Remove símbolos inúteis de uma gramática.
*
* @param {Grammar} grammar Gramática de entrada.
* @return {Grammar} Uma nova gramática sem os simbolos inúteis.
*/
removeUselessSymbols: function(grammar) {
var newGrammar = new Grammar(ko.toJS(grammar));
var sterile = findSterileSymbols(newGrammar),
unreachable = findUnreachableSymbols(newGrammar),
useless = utils.arrayUnion(sterile, unreachable),
nt = newGrammar.nonTerminalSymbols();
// Remove os símbolos inalcançáveis...
newGrammar.nonTerminalSymbols(utils.arrayRemove(nt, utils.arrayUnion(sterile, unreachable)));
newGrammar.removeSymbolRules(useless);
// .. e as produções em que eles aparecem
var rules = newGrammar.productionRules();
for (var i = 0, l = rules.length; i < l; ++i) {
var right = rules[i].rightSide();
for (var j = 0, m = useless.length; j < m; ++j) {
for (var k = 0; k < right.length; ++k) {
if (right[k].indexOf(useless[j]) !== -1) {
right.splice(k--, 1);
}
}
}
rules[i].rightSide(utils.arrayUnique(right));
}
newGrammar.productionRules(rules);
return newGrammar;
},
/**
* Remove produções vazias de uma gramática.
*
* @param {Grammar} grammar Gramática de entrada.
* @return {Grammar} Uma nova gramática sem as produções vazias.
*/
removeEmptyProductions: function(grammar) {
var newGrammar = new Grammar(ko.toJS(grammar));
var newStart;
var rules = newGrammar.productionRules();
for (var i = 0, l = rules.length; i < l; ++i) {
var left = rules[i].leftSide();
var right = rules[i].rightSide();
var emptyIndex = right.indexOf(ProductionRule.EPSILON);
if (emptyIndex === -1) {
// Essa regra não possui produção vazia, ignora e testa a próxima
continue;
}
if (left === newGrammar.productionStartSymbol()) {
// Início de produção pode gerar sentença vazia, então trata o caso especial
newStart = new ProductionRule(newGrammar, {
leftSide: left + "'",
rightSide: [left, ProductionRule.EPSILON]
});
}
// Encontra todas as outras regras que produzem esse símbolo e adiciona uma nova
// produção sem esse símbolo
for (var j = 0; j < l; ++j) {
var rightOther = rules[j].rightSide();
for (var k = 0, m = rightOther.length; k < m; ++k) {
if (rightOther[k].indexOf(left) !== -1) {
rightOther.push(rightOther[k].replace(new RegExp(left, 'g'), ''));
}
}
rules[j].rightSide(utils.arrayUnique(rightOther));
}
right.splice(emptyIndex, 1);
rules[i].rightSide(utils.arrayUnique(right));
}
if (newStart) {
rules.unshift(newStart);
newGrammar.productionStartSymbol(newStart.leftSide());
newGrammar.nonTerminalSymbols([newStart.leftSide()].concat(newGrammar.nonTerminalSymbols()));
}
newGrammar.productionRules(rules);
return newGrammar;
},
/**
* Fatora uma gramática.
*
* @param {Grammar} grammar Gramática de entrada.
* @return {Grammar} Uma nova gramática fatorada.
*/
factor: function(grammar) {
var newGrammar = new Grammar(ko.toJS(grammar));
var rules = replaceStartingSymbols(newGrammar);
var newRules = [];
for (var i = 0; i < rules.length; ++i) {
var left = rules[i].leftSide();
var right = rules[i].rightSide();
var newRight = [];
var firstSymbolGrouped = {};
// Percorre todos as produções verificando quais precisam ser fatoradas
for (var j = 0, l = right.length; j < l; ++j) {
if (right[j].length === 1) {
// Produções com apenas um símbolo são deixadas como estão
newRight.push(right[j]);
}
else {
// Agrupa todas as produções que começam com o mesmo símbolo terminal
var firstSymbol = right[j][0];
if (!firstSymbolGrouped[firstSymbol]) {
firstSymbolGrouped[firstSymbol] = [];
}
firstSymbolGrouped[firstSymbol].push(right[j].substr(1));
}
}
// Adiciona a produção na mesma ordem que estava antes, antes das novas produções serem adicionadas
newRules.push(rules[i]);
for (var j in firstSymbolGrouped) {
if (firstSymbolGrouped[j].length > 1) {
// Mais de uma produção começando com o mesmo símbolo terminal
var newSymbol = newGrammar.createNonTerminalSymbol(left);
newRight.push(j + newSymbol);
newRules.push(new ProductionRule(newGrammar, {
leftSide: newSymbol,
rightSide: firstSymbolGrouped[j]
}));
}
else {
// Senão, é apenas uma produção (índice 0), mantém ela no mesmo lugar
newRight.push(j + firstSymbolGrouped[j][0]);
}
}
// Atualiza as produções para o símbolo existente
rules[i].rightSide(utils.arrayUnique(newRight));
}
newGrammar.productionRules(newRules);
return newGrammar;
},
/**
* Remove recursão à esquerda de uma gramática.
*
* @param {Grammar} grammar Gramática de entrada.
* @return {Grammar} Uma nova gramática sem recursão à esquerda.
*/
removeLeftRecursion: function(grammar) {
var newGrammar = new Grammar(ko.toJS(grammar));
var rules = newGrammar.productionRules();
var newRules = [];
for (var i = 0, l = rules.length; i < l; ++i) {
var left = rules[i].leftSide();
var prods = rules[i].rightSide();
var recursives = [];
// Adiciona a produção na mesma ordem que estava antes, antes das nova produção ser adicionada
newRules.push(rules[i]);
// Não usa cache do length porque o array é modificado internamente
for (var j = 0; j < prods.length; ++j) {
if (prods[j][0] === left && prods[j].length > 1) {
// Encontrou produção recursiva, cria uma nova regra
var newSymbol = newGrammar.createNonTerminalSymbol(left);
recursives.push(newSymbol);
newRules.push(new ProductionRule(newGrammar, {
leftSide: newSymbol,
rightSide: [prods[j].substr(1) + newSymbol, ProductionRule.EPSILON]
}));
// Remove essa produção
prods.splice(j--, 1);
}
}
var newProds = [];
if (recursives.length === 0) {
newProds = prods.slice();
}
else {
for (var j = 0; j < prods.length; ++j) {
for (var k = 0; k < recursives.length; ++k) {
newProds.push(prods[j] + recursives[k]);
}
}
}
rules[i].rightSide(newProds);
}
newGrammar.productionRules(newRules);
return newGrammar;
}
};
});<|fim▁end|>
| |
<|file_name|>timing.py<|end_file_name|><|fim▁begin|>from time import time
from os import remove
from matplotlib.image import imread
import json
import subprocess
import numpy as np
import matplotlib.pyplot as plt
def time_a_function(program, args):
start = time()
subprocess.call([program] + [args])
end = time()
return float(end - start)
def clean(programs):
for p in programs:
remove(p)
def plot_results(times, programs, images):
x = [imread(img)[:,:,0].shape for img in images]
xlabels = [str(xi) for xi in x]
x = [np.prod(xi) for xi in x]
for p in programs:
y, std_y = zip(*times[p])
# plt.plot(x, y, 'o')
plt.errorbar(x, y, yerr=std_y, fmt='o')
plt.xticks(x, xlabels)
plt.xlabel('Image size')
plt.ylabel('Time (s)')
plt.show()
def print_results(times, programs, images):
sizes = [imread(img)[:,:,0].size for img in images]
for p in programs:
print '\n{}'.format(p)
mean_t, std_t = zip(*times[p])
print 'Image'.rjust(13), 'Size'.rjust(8), 'Avg. time'.rjust(10), 'Std. time'.rjust(10)
for img, size, m, s in zip(images, sizes, mean_t, std_t):
print '{:13} {:8d} {:10.5f} {:10.5f}'.format(img, size, m, s)
def main():
print 'Running make...'
subprocess.call(['make', '-j8'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
programs = ['./grayscale.out', './grayscale-seq.out']
images = ['img/emma{}.png'.format(i) for i in range(1, 6)]
n = 20
times = {}<|fim▁hole|> try:
print 'Loading times.json...'
time_file = open('times.json', 'r')
times = json.load(time_file)
except IOError:
print 'Failed, calculating times'
for p in programs:
times[p] = []
for img in images:
t = []
print 'Running {} with {} {} times...'.format(p, img, n),
for _ in range(n):
t.append(time_a_function(p, img))
mean_t = np.mean(t)
std_t = np.std(t)
print '({} +- {})s on average'.format(mean_t, std_t)
times[p].append((mean_t, std_t))
time_file = open('times.json', 'w')
print 'Writing times.json...'
json.dump(times, time_file)
time_file.close()
print_results(times, programs, images)
plot_results(times, programs, images)
clean(programs)
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>part2.rs<|end_file_name|><|fim▁begin|>// adventofcode - day 5
// part 2
use std::io::prelude::*;
use std::fs::File;
fn main(){<|fim▁hole|> let wordlist = import_data();
let mut nice_words = 0i32;
for word in wordlist.lines(){
if contains_repeating_letter(word.to_string())
&& contains_pair(word.to_string()) {
nice_words += 1;
}
}
println!("There are {} nice words in Santas list.", nice_words);
}
fn contains_repeating_letter(word: String) -> bool {
for ii in 0..word.len() - 2 {
let tripple = &word[ii..ii+3];
if tripple.chars().nth(0) == tripple.chars().nth(2) {
return true;
}
}
false
}
fn contains_pair(word: String) -> bool {
for ii in 0..word.len() - 1 {
let pair = &word[ii..ii+2];
let remainder = &word[ii+2..word.len()];
if remainder.contains(pair) {
return true;
}
}
false
}
// This function simply imports the data set from a file called input.txt
fn import_data() -> String {
let mut file = match File::open("../../inputs/05.txt") {
Ok(f) => f,
Err(e) => panic!("file error: {}", e),
};
let mut data = String::new();
match file.read_to_string(&mut data){
Ok(_) => {},
Err(e) => panic!("file error: {}", e),
};
data
}<|fim▁end|>
|
println!("Advent of Code - day 5 | part 2");
// import data
|
<|file_name|>reassemble.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
#
import sys
import csv
from pprint import pprint
"""2018.10.28 John Dey
consume CSV output from pwalk and reassemble directory data.
File system data from pwalk is flattened and out of order.
Rewalk the tree data and create two new fields for each directory.
Create a tree sum of file count and bytes at each directory (node)
that represents the child nodes. Sums for the root will
become the total file count and sum size for every file.
Notes: I wrote this in Python as a proof of concept.
"""
def usage():
"""how to use and exit"""
print("usage: % inputfile.csv" % sys.argv[0])
sys.exit(1)
if len(sys.argv ) != 2:
usage()
dd = {}
with open(sys.argv[1], newline='') as csvfile:
pwalk = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in pwalk:
if int(row[15]) >= 0: # only store directories
dd[int(row[0])] = {'parent': int(row[1]),
'depth': int(row[2]),
'dircnt': int(row[15]),
'sumcnt': int(row[15]), # or Zero?
'dirsiz': int(row[16]),
'sumsiz': int(row[16])}
if int(row[1]) == 0:
root = int(row[0])
dd[int(row[0])]['sumcnt'] += 1
dd[int(row[0])]['sumsiz'] += int(row[7])
print("Total directories: %d" % len(dd.keys()))
"""reassemble the tree"""
for inode in dd.keys():<|fim▁hole|> parent = dd[inode]['parent']
while parent != 0:
dd[parent]['sumcnt'] += dd[inode]['dircnt']
dd[parent]['sumsiz'] += dd[inode]['dirsiz']
parent = dd[parent]['parent']
pprint(dd[root])<|fim▁end|>
| |
<|file_name|>setup.py<|end_file_name|><|fim▁begin|># #############################################################################
# AUTHOR BLOCK:
# #############################################################################
#
# RIB Mosaic RenderMan(R) IDE, see <http://sourceforge.net/projects/ribmosaic><|fim▁hole|># GPL - http://www.gnu.org/copyleft/gpl.html
#
# #############################################################################
# GPL LICENSE BLOCK:
# #############################################################################
#
# Script Copyright (C) Eric Nathen Back
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# #############################################################################
# COPYRIGHT BLOCK:
# #############################################################################
#
# The RenderMan(R) Interface Procedures and Protocol are:
# Copyright 1988, 1989, 2000, 2005 Pixar
# All Rights Reserved
# RenderMan(R) is a registered trademark of Pixar
#
# #############################################################################
# COMMENT BLOCK:
# #############################################################################
#
# !/usr/bin/env python
# Builds ribify C module using the rm_ribify.py PYthon script with Cython.
#
# This script is PEP 8 compliant
#
# Search TODO for incomplete code
# Search FIXME for improper code
# Search XXX for broken code
#
# #############################################################################
# END BLOCKS
# #############################################################################
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(name="ribify", version="0.0",
ext_modules = [Extension("ribify", ["rm_ribify.py"])])<|fim▁end|>
|
# by Eric Nathen Back aka WHiTeRaBBiT, 01-24-2010
# This script is protected by the GPL: Gnu Public License
|
<|file_name|>main.py<|end_file_name|><|fim▁begin|>from os.path import dirname, join
from math import ceil
import numpy as np
from bokeh.io import curdoc
from bokeh.layouts import row, column, widgetbox
from bokeh.models import ColumnDataSource, Slider, Div
from bokeh.plotting import figure
import audio
from audio import MAX_FREQ, TIMESLICE, NUM_BINS
from waterfall import WaterfallRenderer
MAX_FREQ_KHZ = MAX_FREQ*0.001
NUM_GRAMS = 800
GRAM_LENGTH = 512
TILE_WIDTH = 200
EQ_CLAMP = 20
PALETTE = ['#081d58', '#253494', '#225ea8', '#1d91c0', '#41b6c4', '#7fcdbb', '#c7e9b4', '#edf8b1', '#ffffd9']
PLOTARGS = dict(tools="", toolbar_location=None, outline_line_color='#595959')
filename = join(dirname(__file__), "description.html")
desc = Div(text=open(filename).read(),
render_as_text=False, width=1000)
waterfall_renderer = WaterfallRenderer(palette=PALETTE, num_grams=NUM_GRAMS,
gram_length=GRAM_LENGTH, tile_width=TILE_WIDTH)
waterfall_plot = figure(plot_width=990, plot_height=300, min_border_left=80,
x_range=[0, NUM_GRAMS], y_range=[0, MAX_FREQ_KHZ], **PLOTARGS)
waterfall_plot.grid.grid_line_color = None
waterfall_plot.background_fill_color = "#024768"
waterfall_plot.renderers.append(waterfall_renderer)
signal_source = ColumnDataSource(data=dict(t=[], y=[]))
signal_plot = figure(plot_width=600, plot_height=200, title="Signal",
x_range=[0, TIMESLICE], y_range=[-0.8, 0.8], **PLOTARGS)
signal_plot.background_fill_color = "#eaeaea"
signal_plot.line(x="t", y="y", line_color="#024768", source=signal_source)
spectrum_source = ColumnDataSource(data=dict(f=[], y=[]))
spectrum_plot = figure(plot_width=600, plot_height=200, title="Power Spectrum",
y_range=[10**(-4), 10**3], x_range=[0, MAX_FREQ_KHZ],
y_axis_type="log", **PLOTARGS)
spectrum_plot.background_fill_color = "#eaeaea"
spectrum_plot.line(x="f", y="y", line_color="#024768", source=spectrum_source)
eq_angle = 2*np.pi/NUM_BINS<|fim▁hole|> outer=np.tile(eq_range+2.95, NUM_BINS),
start=np.hstack([np.ones_like(eq_range)*eq_angle*(i+0.05) for i in range(NUM_BINS)]),
end=np.hstack([np.ones_like(eq_range)*eq_angle*(i+0.95) for i in range(NUM_BINS)]),
alpha=np.tile(np.zeros_like(eq_range), NUM_BINS),
)
eq_source = ColumnDataSource(data=eq_data)
eq = figure(plot_width=400, plot_height=400,
x_axis_type=None, y_axis_type=None,
x_range=[-20, 20], y_range=[-20, 20], **PLOTARGS)
eq.background_fill_color = "#eaeaea"
eq.annular_wedge(x=0, y=0, fill_color="#024768", fill_alpha="alpha", line_color=None,
inner_radius="inner", outer_radius="outer", start_angle="start", end_angle="end",
source=eq_source)
freq = Slider(start=1, end=MAX_FREQ, value=MAX_FREQ, step=1, title="Frequency")
gain = Slider(start=1, end=20, value=1, step=1, title="Gain")
def update():
signal, spectrum, bins = audio.data['values']
# seems to be a problem with Array property, using List for now
waterfall_renderer.latest = spectrum.tolist()
waterfall_plot.y_range.end = freq.value*0.001
# the if-elses below are small optimization: avoid computing and sending
# all the x-values, if the length has not changed
if len(signal) == len(signal_source.data['y']):
signal_source.data['y'] = signal*gain.value
else:
t = np.linspace(0, TIMESLICE, len(signal))
signal_source.data = dict(t=t, y=signal*gain.value)
if len(spectrum) == len(spectrum_source.data['y']):
spectrum_source.data['y'] = spectrum
else:
f = np.linspace(0, MAX_FREQ_KHZ, len(spectrum))
spectrum_source.data = dict(f=f, y=spectrum)
spectrum_plot.x_range.end = freq.value*0.001
alphas = []
for x in bins:
a = np.zeros_like(eq_range)
N = int(ceil(x))
a[:N] = (1 - eq_range[:N]*0.05)
alphas.append(a)
eq_source.data['alpha'] = np.hstack(alphas)
curdoc().add_periodic_callback(update, 80)
controls = row(widgetbox(gain), widgetbox(freq))
plots = column(waterfall_plot, row(column(signal_plot, spectrum_plot), eq))
curdoc().add_root(desc)
curdoc().add_root(controls)
curdoc().add_root(plots)<|fim▁end|>
|
eq_range = np.arange(EQ_CLAMP, dtype=np.float64)
eq_data = dict(
inner=np.tile(eq_range+2, NUM_BINS),
|
<|file_name|>adt-brace-enums.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|>// except according to those terms.
// Unit test for the "user substitutions" that are annotated on each
// node.
#![feature(nll)]
enum SomeEnum<T> {
SomeVariant { t: T }
}
fn no_annot() {
let c = 66;
SomeEnum::SomeVariant { t: &c };
}
fn annot_underscore() {
let c = 66;
SomeEnum::SomeVariant::<_> { t: &c };
}
fn annot_reference_any_lifetime() {
let c = 66;
SomeEnum::SomeVariant::<&u32> { t: &c };
}
fn annot_reference_static_lifetime() {
let c = 66;
SomeEnum::SomeVariant::<&'static u32> { t: &c }; //~ ERROR
}
fn annot_reference_named_lifetime<'a>(_d: &'a u32) {
let c = 66;
SomeEnum::SomeVariant::<&'a u32> { t: &c }; //~ ERROR
}
fn annot_reference_named_lifetime_ok<'a>(c: &'a u32) {
SomeEnum::SomeVariant::<&'a u32> { t: c };
}
fn annot_reference_named_lifetime_in_closure<'a>(_: &'a u32) {
let _closure = || {
let c = 66;
SomeEnum::SomeVariant::<&'a u32> { t: &c }; //~ ERROR
};
}
fn annot_reference_named_lifetime_in_closure_ok<'a>(c: &'a u32) {
let _closure = || {
SomeEnum::SomeVariant::<&'a u32> { t: c };
};
}
fn main() { }<|fim▁end|>
|
// option. This file may not be copied, modified, or distributed
|
<|file_name|>PackageConstants.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
"""
Package constants
"""
## MIT License
##
## Copyright (c) 2017, krishna bhogaonker
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'krishna bhogaonker'
__copyright__ = 'copyright 2017'
__credits__ = ['krishna bhogaonker']
__license__ = "MIT"
__version__ = '0.1.0'
__maintainer__ = 'krishna bhogaonker'
__email__ = '[email protected]'
__status__ = 'pre-alpha'
from aenum import Enum
class RequestTypes(Enum):
SIMPLEPOINTIMAGERY = 1
DIVAGIS = 2
COMPOSITEDPOINTIMAGERY = 3
class RequestStatusCodes(Enum):
CLOSED = 0
CREATED = 1
QUEUED = 2
PROCESSING = 3
COMPLETED = 4
REJECTED = 5
ERROR = 6
imgCollections = {'Landsat8' : ImageCollection('LANDSAT/LC08/C01/T1',
['B1','B2','B3','B4','B5','B6','B7','B8','B9','B10','B11','BQA'],
'04/13/2011',
'10/07/2017',
30),
'Landsat7' : ImageCollection('LANDSAT/LE07/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],
'01/01/1999',
'09/17/2017',
30),
'Landsat5' : ImageCollection('LANDSAT/LT05/C01/T1',
['B1','B2','B3','B4','B5','B6','B7'],<|fim▁hole|> '05/05/2012',
30),
'Sentinel2msi' : ImageCollection('COPERNICUS/S2',
['B1','B2','B3','B4','B5','B6','B7','B8','B8A','B9','B10','B11','QA10','QA20','QA60'],
'01/23/2015',
'10/20/2017',
30),
'Sentinel2sar' : ImageCollection('COPERNICUS/S1_GRD',
['VV', 'HH',['VV', 'VH'], ['HH','HV']],
'10/03/2014',
'10/20/2017',
30),
'ModisThermalAnomalies' : ImageCollection('MODIS/006/MOD14A1',
['FireMask', 'MaxFRP','sample', 'QA'],
'02/18/2000',
'10/23/2017',
30)
}
if __name__ == "__main__":
print('set of package constants.')<|fim▁end|>
|
'01/01/1984',
|
<|file_name|>ProfileOtherfiles.java<|end_file_name|><|fim▁begin|>/*
* Geopaparazzi - Digital field mapping on Android based devices
* Copyright (C) 2016 HydroloGIS (www.hydrologis.com)
*
* This program is free software: you can redistribute it and/or modify<|fim▁hole|> * (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package eu.geopaparazzi.library.profiles.objects;
import android.os.Parcel;
import android.os.Parcelable;
import eu.geopaparazzi.library.network.download.IDownloadable;
/**
* Created by hydrologis on 19/03/18.
*/
public class ProfileOtherfiles extends ARelativePathResource implements Parcelable, IDownloadable {
public String url = "";
public String modifiedDate = "";
public long size = -1;
private String destinationPath = "";
public ProfileOtherfiles() {
}
protected ProfileOtherfiles(Parcel in) {
url = in.readString();
modifiedDate = in.readString();
size = in.readLong();
destinationPath = in.readString();
}
public static final Creator<ProfileOtherfiles> CREATOR = new Creator<ProfileOtherfiles>() {
@Override
public ProfileOtherfiles createFromParcel(Parcel in) {
return new ProfileOtherfiles(in);
}
@Override
public ProfileOtherfiles[] newArray(int size) {
return new ProfileOtherfiles[size];
}
};
@Override
public long getSize() {
return size;
}
@Override
public String getUrl() {
return url;
}
@Override
public String getDestinationPath() {
return destinationPath;
}
@Override
public void setDestinationPath(String path) {
destinationPath = path;
}
@Override
public int describeContents() {
return 0;
}
@Override
public void writeToParcel(Parcel dest, int flags) {
dest.writeString(url);
dest.writeString(modifiedDate);
dest.writeLong(size);
dest.writeString(destinationPath);
}
}<|fim▁end|>
|
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2020 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
pub mod aml;<|fim▁hole|>pub use self::sdt::HEADER_LEN;
fn generate_checksum(data: &[u8]) -> u8 {
(255 - data.iter().fold(0u8, |acc, x| acc.wrapping_add(*x))).wrapping_add(1)
}<|fim▁end|>
|
pub mod facs;
pub mod rsdp;
pub mod sdt;
|
<|file_name|>model2d.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from hyperspy._signals.signal2d import Signal2D
from hyperspy.decorators import interactive_range_selector
from hyperspy.exceptions import WrongObjectError
from hyperspy.model import BaseModel, ModelComponents, ModelSpecialSlicers
class Model2D(BaseModel):
"""Model and data fitting for two dimensional signals.
A model is constructed as a linear combination of :mod:`components2D` that
are added to the model using :meth:`append` or :meth:`extend`. There
are many predifined components available in the in the :mod:`components2D`
module. If needed, new components can be created easily using the code of
existing components as a template.
Once defined, the model can be fitted to the data using :meth:`fit` or
:meth:`multifit`. Once the optimizer reaches the convergence criteria or
the maximum number of iterations the new value of the component parameters
are stored in the components.
It is possible to access the components in the model by their name or by
the index in the model. An example is given at the end of this docstring.
Note that methods are not yet defined for plotting 2D models or using
gradient based optimisation methods - these will be added soon.
Attributes
----------
signal : Signal2D instance
It contains the data to fit.
chisq : A Signal of floats
Chi-squared of the signal (or np.nan if not yet fit)
dof : A Signal of integers
Degrees of freedom of the signal (0 if not yet fit)
red_chisq : Signal instance
Reduced chi-squared.
components : `ModelComponents` instance
The components of the model are attributes of this class. This provides
a convinient way to access the model components when working in IPython
as it enables tab completion.
Methods
-------
append
Append one component to the model.
extend
Append multiple components to the model.
remove<|fim▁hole|> Remove component from model.
fit, multifit
Fit the model to the data at the current position or the full dataset.
See also
--------
Base Model
Model1D
Example
-------
"""
def __init__(self, signal2D, dictionary=None):
super(Model2D, self).__init__()
self.signal = signal2D
self.axes_manager = self.signal.axes_manager
self._plot = None
self._position_widgets = {}
self._adjust_position_all = None
self._plot_components = False
self._suspend_update = False
self._model_line = None
self.xaxis, self.yaxis = np.meshgrid(
self.axes_manager.signal_axes[0].axis,
self.axes_manager.signal_axes[1].axis)
self.axes_manager.events.indices_changed.connect(
self._on_navigating, [])
self.channel_switches = np.ones(self.xaxis.shape, dtype=bool)
self.chisq = signal2D._get_navigation_signal()
self.chisq.change_dtype("float")
self.chisq.data.fill(np.nan)
self.chisq.metadata.General.title = (
self.signal.metadata.General.title + ' chi-squared')
self.dof = self.chisq._deepcopy_with_new_data(
np.zeros_like(self.chisq.data, dtype='int'))
self.dof.metadata.General.title = (
self.signal.metadata.General.title + ' degrees of freedom')
self.free_parameters_boundaries = None
self.convolved = False
self.components = ModelComponents(self)
if dictionary is not None:
self._load_dictionary(dictionary)
self.inav = ModelSpecialSlicers(self, True)
self.isig = ModelSpecialSlicers(self, False)
self._whitelist = {
'channel_switches': None,
'convolved': None,
'free_parameters_boundaries': None,
'chisq.data': None,
'dof.data': None}
self._slicing_whitelist = {
'channel_switches': 'isig',
'chisq.data': 'inav',
'dof.data': 'inav'}
@property
def signal(self):
return self._signal
@signal.setter
def signal(self, value):
if isinstance(value, Signal2D):
self._signal = value
else:
raise WrongObjectError(str(type(value)), 'Signal2D')
def __call__(self, non_convolved=True, onlyactive=False):
"""Returns the corresponding 2D model for the current coordinates
Parameters
----------
only_active : bool
If true, only the active components will be used to build the
model.
Returns
-------
numpy array
"""
sum_ = np.zeros_like(self.xaxis)
if onlyactive is True:
for component in self: # Cut the parameters list
if component.active:
np.add(sum_, component.function(self.xaxis, self.yaxis),
sum_)
else:
for component in self: # Cut the parameters list
np.add(sum_, component.function(self.xaxis, self.yaxis),
sum_)
return sum_[self.channel_switches]
def _errfunc(self, param, y, weights=None):
if weights is None:
weights = 1.
errfunc = self._model_function(param).ravel() - y
return errfunc * weights
def _set_signal_range_in_pixels(self, i1=None, i2=None):
raise NotImplementedError
@interactive_range_selector
def set_signal_range(self, x1=None, x2=None):
raise NotImplementedError
def _remove_signal_range_in_pixels(self, i1=None, i2=None):
raise NotImplementedError
@interactive_range_selector
def remove_signal_range(self, x1=None, x2=None):
raise NotImplementedError
def reset_signal_range(self):
raise NotImplementedError
def _add_signal_range_in_pixels(self, i1=None, i2=None):
raise NotImplementedError
@interactive_range_selector
def add_signal_range(self, x1=None, x2=None):
raise NotImplementedError
def reset_the_signal_range(self):
raise NotImplementedError
def _check_analytical_jacobian(self):
"""Check all components have analytical gradients.
If they do, return True and an empty string.
If they do not, return False and an error message.
"""
return False, "Analytical gradients not implemented for Model2D"
def _jacobian(self, param, y, weights=None):
raise NotImplementedError
def _function4odr(self, param, x):
raise NotImplementedError
def _jacobian4odr(self, param, x):
raise NotImplementedError
def _poisson_likelihood_function(self, param, y, weights=None):
raise NotImplementedError
def _gradient_ml(self, param, y, weights=None):
raise NotImplementedError
def _gradient_ls(self, param, y, weights=None):
raise NotImplementedError
def _huber_loss_function(self, param, y, weights=None, huber_delta=None):
raise NotImplementedError
def _gradient_huber(self, param, y, weights=None, huber_delta=None):
raise NotImplementedError
def _model2plot(self, axes_manager, out_of_range2nans=True):
old_axes_manager = None
if axes_manager is not self.axes_manager:
old_axes_manager = self.axes_manager
self.axes_manager = axes_manager
self.fetch_stored_values()
s = self.__call__(non_convolved=False, onlyactive=True)
if old_axes_manager is not None:
self.axes_manager = old_axes_manager
self.fetch_stored_values()
if out_of_range2nans is True:
ns = np.empty(self.xaxis.shape)
ns.fill(np.nan)
ns[np.where(self.channel_switches)] = s.ravel()
s = ns
return s
def plot(self, plot_components=False):
raise NotImplementedError
@staticmethod
def _connect_component_line(component):
raise NotImplementedError
@staticmethod
def _disconnect_component_line(component):
raise NotImplementedError
def _plot_component(self, component):
raise NotImplementedError
def enable_adjust_position(
self, components=None, fix_them=True, show_label=True):
raise NotImplementedError
def disable_adjust_position(self):
raise NotImplementedError<|fim▁end|>
| |
<|file_name|>dataloader.js<|end_file_name|><|fim▁begin|>/*
Plugin Name: amCharts Data Loader
Description: This plugin adds external data loading capabilities to all amCharts libraries.
Author: Martynas Majeris, amCharts
Version: 1.0.2
Author URI: http://www.amcharts.com/
Copyright 2015 amCharts
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Please note that the above license covers only this plugin. It by all means does
not apply to any other amCharts products that are covered by different licenses.
*/
/**
* TODO:
* incremental load
* XML support (?)
*/
/**
* Initialize language prompt container
*/
AmCharts.translations.dataLoader = {}
/**
* Set init handler
*/
AmCharts.addInitHandler( function ( chart ) {
/**
* Check if dataLoader is set (initialize it)
*/
if ( undefined === chart.dataLoader || ! isObject( chart.dataLoader ) )
chart.dataLoader = {};
/**
* Check charts version for compatibility:
* the first compatible version is 3.13
*/
var version = chart.version.split( '.' );
if ( ( Number( version[0] ) < 3 ) || ( 3 == Number( version[0] ) && ( Number( version[1] ) < 13 ) ) )
return;
/**
* Define object reference for easy access
*/
var l = chart.dataLoader;
l.remaining = 0;
/**
* Set defaults
*/
var defaults = {
'async': true,
'format': 'json',
'showErrors': true,
'showCurtain': true,
'noStyles': false,
'reload': 0,
'timestamp': false,
'delimiter': ',',
'skip': 0,
'useColumnNames': false,
'reverse': false,
'reloading': false,
'complete': false,
'error': false
};
/**
* Load all files in a row
*/
if ( 'stock' === chart.type ) {
// delay this a little bit so the chart has the chance to build itself
setTimeout( function () {
// preserve animation
if ( 0 > chart.panelsSettings.startDuration ) {
l.startDuration = chart.panelsSettings.startDuration;
chart.panelsSettings.startDuration = 0;
}
// cycle through all of the data sets
for ( var x = 0; x < chart.dataSets.length; x++ ) {
var ds = chart.dataSets[ x ];
// load data
if ( undefined !== ds.dataLoader && undefined !== ds.dataLoader.url ) {
ds.dataProvider = [];
applyDefaults( ds.dataLoader );
loadFile( ds.dataLoader.url, ds, ds.dataLoader, 'dataProvider' );
}
// load events data
if ( undefined !== ds.eventDataLoader && undefined !== ds.eventDataLoader.url ) {
ds.events = [];
applyDefaults( ds.eventDataLoader );
loadFile( ds.eventDataLoader.url, ds, ds.eventDataLoader, 'stockEvents' );
}
}
}, 100 );
}
else {
applyDefaults( l );
if ( undefined === l.url )
return;
// preserve animation
if ( undefined !== chart.startDuration && ( 0 < chart.startDuration ) ) {
l.startDuration = chart.startDuration;
chart.startDuration = 0;
}
chart.dataProvider = [];
loadFile( l.url, chart, l, 'dataProvider' );
}
/**
* Loads a file and determines correct parsing mechanism for it
*/
function loadFile( url, holder, options, providerKey ) {
// set default providerKey
if ( undefined === providerKey )
providerKey = 'dataProvider';
// show curtain
if ( options.showCurtain )
showCurtain( undefined, options.noStyles );
// increment loader count
l.remaining++;
// load the file
AmCharts.loadFile( url, options, function ( response ) {
// error?
if ( false === response ) {
callFunction( options.error, url, options );
raiseError( AmCharts.__( 'Error loading the file', chart.language ) + ': ' + url, false, options );
}
else {
// determine the format
if ( undefined === options.format ) {
// TODO
options.format = 'json';
}
// lowercase
options.format = options.format.toLowerCase();
// invoke parsing function
switch( options.format ) {
case 'json':
holder[providerKey] = AmCharts.parseJSON( response, options );
if ( false === holder[providerKey] ) {
callFunction( options.error, options );
raiseError( AmCharts.__( 'Error parsing JSON file', chart.language ) + ': ' + l.url, false, options );
holder[providerKey] = [];
return;
}
else {
holder[providerKey] = postprocess( holder[providerKey], options );
callFunction( options.load, options );
}
break;
case 'csv':
holder[providerKey] = AmCharts.parseCSV( response, options );
if ( false === holder[providerKey] ) {
callFunction( options.error, options );
raiseError( AmCharts.__( 'Error parsing CSV file', chart.language ) + ': ' + l.url, false, options );
holder[providerKey] = [];
return;
}
else {
holder[providerKey] = postprocess( holder[providerKey], options );
callFunction( options.load, options );
}
break;
default:
callFunction( options.error, options );
raiseError( AmCharts.__( 'Unsupported data format', chart.language ) + ': ' + options.format, false, options.noStyles );
return;
break;
}
// decrement remaining counter
l.remaining--;
// we done?
if ( 0 === l.remaining ) {
// callback
callFunction( options.complete );
// take in the new data
if ( options.async ) {
if ( 'map' === chart.type )
chart.validateNow( true );
else {
// take in new data
chart.validateData();
// make the chart animate again
if ( l.startDuration ) {
if ( 'stock' === chart.type ) {
chart.panelsSettings.startDuration = l.startDuration;
for ( var x = 0; x < chart.panels.length; x++ ) {
chart.panels[x].startDuration = l.startDuration;
chart.panels[x].animateAgain();
}
}
else {
chart.startDuration = l.startDuration;
chart.animateAgain();
}
}
}
}
// restore default period
if ( 'stock' === chart.type && ! options.reloading )
chart.periodSelector.setDefaultPeriod();
// remove curtain
removeCurtain();
}
// schedule another load of necessary
if ( options.reload ) {
if ( options.timeout )
clearTimeout( options.timeout );
options.timeout = setTimeout( loadFile, 1000 * options.reload, url, holder, options );
options.reloading = true;
}
}
} );
}
/**
* Checks if postProcess is set and invokes the handler
*/
function postprocess ( data, options ) {
if ( undefined !== options.postProcess && isFunction( options.postProcess ) )
try {
return options.postProcess.call( this, data, options );
}
catch ( e ) {
raiseError( AmCharts.__( 'Error loading file', chart.language ) + ': ' + options.url, false, options );
return data;
}
else
return data;
}
/**
* Returns true if argument is object
*/
function isArray ( obj ) {
return obj instanceof Array;
}
/**
* Returns true if argument is array
*/
function isObject ( obj ) {
return 'object' === typeof( obj );
}
/**
* Returns true is argument is a function
*/
function isFunction ( obj ) {
return 'function' === typeof( obj );
}
/**
* Applies defaults to config object
*/
function applyDefaults ( obj ) {
for ( var x = 0; x < defaults.length; x++ ) {
setDefault( obj, x, defaults[ x ] );
}
}
/**
* Checks if object property is set, sets with a default if it isn't
*/
<|fim▁hole|> function setDefault ( obj, key, value ) {
if ( undefined === obj[ key ] )
obj[ key ] = value;
}
/**
* Raises an internal error (writes it out to console)
*/
function raiseError ( msg, error, options ) {
if ( options.showErrors )
showCurtain( msg, options.noStyles );
else {
removeCurtain();
console.log( msg );
}
}
/**
* Shows curtain over chart area
*/
function showCurtain ( msg, noStyles ) {
// remove previous curtain if there is one
removeCurtain();
// did we pass in the message?
if ( undefined === msg )
msg = AmCharts.__( 'Loading data...', chart.language );
// create and populate curtain element
var curtain =document.createElement( 'div' );
curtain.setAttribute( 'id', chart.div.id + '-curtain' );
curtain.className = 'amcharts-dataloader-curtain';
if ( true !== noStyles ) {
curtain.style.position = 'absolute';
curtain.style.top = 0;
curtain.style.left = 0;
curtain.style.width = ( undefined !== chart.realWidth ? chart.realWidth : chart.divRealWidth ) + 'px';
curtain.style.height = ( undefined !== chart.realHeight ? chart.realHeight : chart.divRealHeight ) + 'px';
curtain.style.textAlign = 'center';
curtain.style.display = 'table';
curtain.style.fontSize = '20px';
curtain.style.background = 'rgba(255, 255, 255, 0.3)';
curtain.innerHTML = '<div style="display: table-cell; vertical-align: middle;">' + msg + '</div>';
}
else {
curtain.innerHTML = msg;
}
chart.containerDiv.appendChild( curtain );
l.curtain = curtain;
}
/**
* Removes the curtain
*/
function removeCurtain () {
try {
if ( undefined !== l.curtain )
chart.containerDiv.removeChild( l.curtain );
}
catch ( e ) {
// do nothing
}
l.curtain = undefined;
}
/**
* Execute callback function
*/
function callFunction ( func, param1, param2 ) {
if ( 'function' === typeof func )
func.call( l, param1, param2 );
}
}, [ 'pie', 'serial', 'xy', 'funnel', 'radar', 'gauge', 'gantt', 'stock', 'map' ] );
/**
* Returns prompt in a chart language (set by chart.language) if it is
* available
*/
if ( undefined === AmCharts.__ ) {
AmCharts.__ = function ( msg, language ) {
if ( undefined !== language
&& undefined !== AmCharts.translations.dataLoader[ chart.language ]
&& undefined !== AmCharts.translations.dataLoader[ chart.language ][ msg ] )
return AmCharts.translations.dataLoader[ chart.language ][ msg ];
else
return msg;
}
}
/**
* Loads a file from url and calls function handler with the result
*/
AmCharts.loadFile = function ( url, options, handler ) {
// create the request
if ( window.XMLHttpRequest ) {
// IE7+, Firefox, Chrome, Opera, Safari
var request = new XMLHttpRequest();
} else {
// code for IE6, IE5
var request = new ActiveXObject( 'Microsoft.XMLHTTP' );
}
// set handler for data if async loading
request.onreadystatechange = function () {
if ( 4 == request.readyState && 404 == request.status )
handler.call( this, false );
else if ( 4 == request.readyState && 200 == request.status )
handler.call( this, request.responseText );
}
// load the file
try {
request.open( 'GET', options.timestamp ? AmCharts.timestampUrl( url ) : url, options.async );
request.send();
}
catch ( e ) {
handler.call( this, false );
}
};
/**
* Parses JSON string into an object
*/
AmCharts.parseJSON = function ( response, options ) {
try {
if ( undefined !== JSON )
return JSON.parse( response );
else
return eval( response );
}
catch ( e ) {
return false;
}
}
/**
* Prases CSV string into an object
*/
AmCharts.parseCSV = function ( response, options ) {
// parse CSV into array
var data = AmCharts.CSVToArray( response, options.delimiter );
// init resuling array
var res = [];
var cols = [];
// first row holds column names?
if ( options.useColumnNames ) {
cols = data.shift();
// normalize column names
for ( var x = 0; x < cols.length; x++ ) {
// trim
var col = cols[ x ].replace( /^\s+|\s+$/gm, '' );
// check for empty
if ( '' === col )
col = 'col' + x;
cols[ x ] = col;
}
if ( 0 < options.skip )
options.skip--;
}
// skip rows
for ( var i = 0; i < options.skip; i++ )
data.shift();
// iterate through the result set
var row;
while ( row = options.reverse ? data.pop() : data.shift() ) {
var dataPoint = {};
for ( var i = 0; i < row.length; i++ ) {
var col = undefined === cols[ i ] ? 'col' + i : cols[ i ];
dataPoint[ col ] = row[ i ];
}
res.push( dataPoint );
}
return res;
}
/**
* Parses CSV data into array
* Taken from here: (thanks!)
* http://www.bennadel.com/blog/1504-ask-ben-parsing-csv-strings-with-javascript-exec-regular-expression-command.htm
*/
AmCharts.CSVToArray = function ( strData, strDelimiter ){
// Check to see if the delimiter is defined. If not,
// then default to comma.
strDelimiter = (strDelimiter || ",");
// Create a regular expression to parse the CSV values.
var objPattern = new RegExp(
(
// Delimiters.
"(\\" + strDelimiter + "|\\r?\\n|\\r|^)" +
// Quoted fields.
"(?:\"([^\"]*(?:\"\"[^\"]*)*)\"|" +
// Standard fields.
"([^\"\\" + strDelimiter + "\\r\\n]*))"
),
"gi"
);
// Create an array to hold our data. Give the array
// a default empty first row.
var arrData = [[]];
// Create an array to hold our individual pattern
// matching groups.
var arrMatches = null;
// Keep looping over the regular expression matches
// until we can no longer find a match.
while (arrMatches = objPattern.exec( strData )){
// Get the delimiter that was found.
var strMatchedDelimiter = arrMatches[ 1 ];
// Check to see if the given delimiter has a length
// (is not the start of string) and if it matches
// field delimiter. If id does not, then we know
// that this delimiter is a row delimiter.
if (
strMatchedDelimiter.length &&
(strMatchedDelimiter != strDelimiter)
){
// Since we have reached a new row of data,
// add an empty row to our data array.
arrData.push( [] );
}
// Now that we have our delimiter out of the way,
// let's check to see which kind of value we
// captured (quoted or unquoted).
if (arrMatches[ 2 ]){
// We found a quoted value. When we capture
// this value, unescape any double quotes.
var strMatchedValue = arrMatches[ 2 ].replace(
new RegExp( "\"\"", "g" ),
"\""
);
} else {
// We found a non-quoted value.
var strMatchedValue = arrMatches[ 3 ];
}
// Now that we have our value string, let's add
// it to the data array.
arrData[ arrData.length - 1 ].push( strMatchedValue );
}
// Return the parsed data.
return( arrData );
}
/**
* Appends timestamp to the url
*/
AmCharts.timestampUrl = function ( url ) {
var p = url.split( '?' );
if ( 1 === p.length )
p[1] = new Date().getTime();
else
p[1] += '&' + new Date().getTime();
return p.join( '?' );
}<|fim▁end|>
| |
<|file_name|>relative.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import<|fim▁hole|>from . import mod<|fim▁end|>
| |
<|file_name|>kubelet_test.go<|end_file_name|><|fim▁begin|>/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"path"
"reflect"
"sort"
"strings"
"testing"
"time"
cadvisorApi "github.com/google/cadvisor/info/v1"
cadvisorApiv2 "github.com/google/cadvisor/info/v2"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/client/unversioned/record"
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/bandwidth"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/volume"
_ "k8s.io/kubernetes/pkg/volume/host_path"
)
func init() {
api.ForTesting_ReferencesAllowBlankSelfLinks = true
util.ReallyCrash = true
}
const testKubeletHostname = "127.0.0.1"
type fakeHTTP struct {
url string
err error
}
func (f *fakeHTTP) Get(url string) (*http.Response, error) {
f.url = url
return nil, f.err
}
type TestKubelet struct {
kubelet *Kubelet
fakeRuntime *kubecontainer.FakeRuntime
fakeCadvisor *cadvisor.Mock
fakeKubeClient *testclient.Fake
fakeMirrorClient *fakeMirrorClient
}
func newTestKubelet(t *testing.T) *TestKubelet {
fakeRuntime := &kubecontainer.FakeRuntime{}
fakeRuntime.VersionInfo = "1.15"
fakeRecorder := &record.FakeRecorder{}
fakeKubeClient := &testclient.Fake{}
kubelet := &Kubelet{}
kubelet.kubeClient = fakeKubeClient
kubelet.os = kubecontainer.FakeOS{}
kubelet.hostname = testKubeletHostname
kubelet.nodeName = testKubeletHostname
kubelet.runtimeUpThreshold = maxWaitForContainerRuntime
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
t.Fatalf("can't make a temp rootdir: %v", err)
} else {
kubelet.rootDirectory = tempDir
}
if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
}
kubelet.sourcesReady = func() bool { return true }
kubelet.masterServiceNamespace = api.NamespaceDefault
kubelet.serviceLister = testServiceLister{}
kubelet.nodeLister = testNodeLister{}
kubelet.readinessManager = kubecontainer.NewReadinessManager()
kubelet.recorder = fakeRecorder
kubelet.statusManager = newStatusManager(fakeKubeClient)
if err := kubelet.setupDataDirs(); err != nil {
t.Fatalf("can't initialize kubelet data dirs: %v", err)
}
mockCadvisor := &cadvisor.Mock{}
kubelet.cadvisor = mockCadvisor
podManager, fakeMirrorClient := newFakePodManager()
kubelet.podManager = podManager
kubelet.containerRefManager = kubecontainer.NewRefManager()
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
if err != nil {
t.Fatalf("can't initialize disk space manager: %v", err)
}
kubelet.diskSpaceManager = diskSpaceManager
kubelet.containerRuntime = fakeRuntime
kubelet.runtimeCache = kubecontainer.NewFakeRuntimeCache(kubelet.containerRuntime)
kubelet.podWorkers = &fakePodWorkers{
syncPodFn: kubelet.syncPod,
runtimeCache: kubelet.runtimeCache,
t: t,
}
kubelet.volumeManager = newVolumeManager()
kubelet.containerManager, _ = newContainerManager(mockCadvisor, "", "", "")
kubelet.networkConfigured = true
fakeClock := &util.FakeClock{Time: time.Now()}
kubelet.backOff = util.NewBackOff(time.Second, time.Minute)
kubelet.backOff.Clock = fakeClock<|fim▁hole|>}
func newTestPods(count int) []*api.Pod {
pods := make([]*api.Pod, count)
for i := 0; i < count; i++ {
pods[i] = &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: fmt.Sprintf("pod%d", i),
},
}
}
return pods
}
func TestKubeletDirs(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
root := kubelet.rootDirectory
var exp, got string
got = kubelet.getPodsDir()
exp = path.Join(root, "pods")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPluginsDir()
exp = path.Join(root, "plugins")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPluginDir("foobar")
exp = path.Join(root, "plugins/foobar")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("abc123")
exp = path.Join(root, "pods/abc123")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodVolumesDir("abc123")
exp = path.Join(root, "pods/abc123/volumes")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodVolumeDir("abc123", "plugin", "foobar")
exp = path.Join(root, "pods/abc123/volumes/plugin/foobar")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodPluginsDir("abc123")
exp = path.Join(root, "pods/abc123/plugins")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodPluginDir("abc123", "foobar")
exp = path.Join(root, "pods/abc123/plugins/foobar")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("abc123", "def456")
exp = path.Join(root, "pods/abc123/containers/def456")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
}
func TestKubeletDirsCompat(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
root := kubelet.rootDirectory
if err := os.MkdirAll(root, 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
var exp, got string
// Old-style pod dir.
if err := os.MkdirAll(fmt.Sprintf("%s/oldpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
// New-style pod dir.
if err := os.MkdirAll(fmt.Sprintf("%s/pods/newpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
// Both-style pod dir.
if err := os.MkdirAll(fmt.Sprintf("%s/bothpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
if err := os.MkdirAll(fmt.Sprintf("%s/pods/bothpod", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
got = kubelet.getPodDir("oldpod")
exp = path.Join(root, "oldpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("newpod")
exp = path.Join(root, "pods/newpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("bothpod")
exp = path.Join(root, "pods/bothpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodDir("neitherpod")
exp = path.Join(root, "pods/neitherpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
root = kubelet.getPodDir("newpod")
// Old-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/oldctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
// New-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/containers/newctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
// Both-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/bothctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
if err := os.MkdirAll(fmt.Sprintf("%s/containers/bothctr", root), 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
got = kubelet.getPodContainerDir("newpod", "oldctr")
exp = path.Join(root, "oldctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("newpod", "newctr")
exp = path.Join(root, "containers/newctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("newpod", "bothctr")
exp = path.Join(root, "containers/bothctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
got = kubelet.getPodContainerDir("newpod", "neitherctr")
exp = path.Join(root, "containers/neitherctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
}
var emptyPodUIDs map[types.UID]SyncPodType
func TestSyncLoopTimeUpdate(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
loopTime1 := kubelet.LatestLoopEntryTime()
if !loopTime1.IsZero() {
t.Errorf("Unexpected sync loop time: %s, expected 0", loopTime1)
}
kubelet.syncLoopIteration(make(chan PodUpdate), kubelet)
loopTime2 := kubelet.LatestLoopEntryTime()
if loopTime2.IsZero() {
t.Errorf("Unexpected sync loop time: 0, expected non-zero value.")
}
kubelet.syncLoopIteration(make(chan PodUpdate), kubelet)
loopTime3 := kubelet.LatestLoopEntryTime()
if !loopTime3.After(loopTime1) {
t.Errorf("Sync Loop Time was not updated correctly. Second update timestamp should be greater than first update timestamp")
}
}
func TestSyncLoopAbort(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
kubelet.lastTimestampRuntimeUp = time.Now()
kubelet.networkConfigured = true
ch := make(chan PodUpdate)
close(ch)
// sanity check (also prevent this test from hanging in the next step)
ok := kubelet.syncLoopIteration(ch, kubelet)
if ok {
t.Fatalf("expected syncLoopIteration to return !ok since update chan was closed")
}
// this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly
kubelet.syncLoop(ch, kubelet)
}
func TestSyncPodsStartPod(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
},
},
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodSyncs(pods)
fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
}
func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
ready := false
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
kubelet.sourcesReady = func() bool { return ready }
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "foo", Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "bar"},
},
},
}
kubelet.HandlePodCleanups()
// Sources are not ready yet. Don't remove any pods.
fakeRuntime.AssertKilledPods([]string{})
ready = true
kubelet.HandlePodCleanups()
// Sources are ready. Remove unwanted pods.
fakeRuntime.AssertKilledPods([]string{"12345678"})
}
func TestMountExternalVolumes(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{&volume.FakeVolumePlugin{PluginName: "fake", Host: nil}}, &volumeHost{kubelet})
pod := api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "test",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{},
},
},
},
}
podVolumes, err := kubelet.mountExternalVolumes(&pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
t.Errorf("api.Pod volumes map is missing key: %s. %#v", name, podVolumes)
}
}
}
func TestGetPodVolumesFromDisk(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
plug := &volume.FakeVolumePlugin{PluginName: "fake", Host: nil}
kubelet.volumePluginMgr.InitPlugins([]volume.VolumePlugin{plug}, &volumeHost{kubelet})
volsOnDisk := []struct {
podUID types.UID
volName string
}{
{"pod1", "vol1"},
{"pod1", "vol2"},
{"pod2", "vol1"},
}
expectedPaths := []string{}
for i := range volsOnDisk {
fv := volume.FakeVolume{PodUID: volsOnDisk[i].podUID, VolName: volsOnDisk[i].volName, Plugin: plug}
fv.SetUp()
expectedPaths = append(expectedPaths, fv.GetPath())
}
volumesFound := kubelet.getPodVolumesFromDisk()
if len(volumesFound) != len(expectedPaths) {
t.Errorf("Expected to find %d cleaners, got %d", len(expectedPaths), len(volumesFound))
}
for _, ep := range expectedPaths {
found := false
for _, cl := range volumesFound {
if ep == cl.GetPath() {
found = true
break
}
}
if !found {
t.Errorf("Could not find a volume with path %s", ep)
}
}
}
type stubVolume struct {
path string
}
func (f *stubVolume) GetPath() string {
return f.path
}
func TestMakeVolumeMounts(t *testing.T) {
container := api.Container{
VolumeMounts: []api.VolumeMount{
{
MountPath: "/mnt/path",
Name: "disk",
ReadOnly: false,
},
{
MountPath: "/mnt/path3",
Name: "disk",
ReadOnly: true,
},
{
MountPath: "/mnt/path4",
Name: "disk4",
ReadOnly: false,
},
{
MountPath: "/mnt/path5",
Name: "disk5",
ReadOnly: false,
},
},
}
podVolumes := kubecontainer.VolumeMap{
"disk": &stubVolume{"/mnt/disk"},
"disk4": &stubVolume{"/mnt/host"},
"disk5": &stubVolume{"/var/lib/kubelet/podID/volumes/empty/disk5"},
}
mounts := makeMounts(&container, podVolumes)
expectedMounts := []kubecontainer.Mount{
{
"disk",
"/mnt/path",
"/mnt/disk",
false,
},
{
"disk",
"/mnt/path3",
"/mnt/disk",
true,
},
{
"disk4",
"/mnt/path4",
"/mnt/host",
false,
},
{
"disk5",
"/mnt/path5",
"/var/lib/kubelet/podID/volumes/empty/disk5",
false,
},
}
if !reflect.DeepEqual(mounts, expectedMounts) {
t.Errorf("Unexpected mounts: Expected %#v got %#v. Container was: %#v", expectedMounts, mounts, container)
}
}
func TestGetContainerInfo(t *testing.T) {
containerID := "ab2cdf"
containerPath := fmt.Sprintf("/docker/%v", containerID)
containerInfo := cadvisorApi.ContainerInfo{
ContainerReference: cadvisorApi.ContainerReference{
Name: containerPath,
},
}
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
kubelet := testKubelet.kubelet
cadvisorReq := &cadvisorApi.ContainerInfoRequest{}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: types.UID(containerID),
},
},
},
}
stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", cadvisorReq)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if stats == nil {
t.Fatalf("stats should not be nil")
}
mockCadvisor.AssertExpectations(t)
}
func TestGetRawContainerInfoRoot(t *testing.T) {
containerPath := "/"
containerInfo := &cadvisorApi.ContainerInfo{
ContainerReference: cadvisorApi.ContainerReference{
Name: containerPath,
},
}
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
cadvisorReq := &cadvisorApi.ContainerInfoRequest{}
mockCadvisor.On("ContainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
_, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, false)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
mockCadvisor.AssertExpectations(t)
}
func TestGetRawContainerInfoSubcontainers(t *testing.T) {
containerPath := "/kubelet"
containerInfo := map[string]*cadvisorApi.ContainerInfo{
containerPath: {
ContainerReference: cadvisorApi.ContainerReference{
Name: containerPath,
},
},
"/kubelet/sub": {
ContainerReference: cadvisorApi.ContainerReference{
Name: "/kubelet/sub",
},
},
}
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
cadvisorReq := &cadvisorApi.ContainerInfoRequest{}
mockCadvisor.On("SubcontainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil)
result, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, true)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(result) != 2 {
t.Errorf("Expected 2 elements, received: %+v", result)
}
mockCadvisor.AssertExpectations(t)
}
func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
containerID := "ab2cdf"
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
fakeRuntime := testKubelet.fakeRuntime
cadvisorApiFailure := fmt.Errorf("cAdvisor failure")
containerInfo := cadvisorApi.ContainerInfo{}
cadvisorReq := &cadvisorApi.ContainerInfoRequest{}
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, cadvisorApiFailure)
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "uuid",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{Name: "foo",
ID: types.UID(containerID),
},
},
},
}
stats, err := kubelet.GetContainerInfo("qux_ns", "uuid", "foo", cadvisorReq)
if stats != nil {
t.Errorf("non-nil stats on error")
}
if err == nil {
t.Errorf("expect error but received nil error")
return
}
if err.Error() != cadvisorApiFailure.Error() {
t.Errorf("wrong error message. expect %v, got %v", cadvisorApiFailure, err)
}
mockCadvisor.AssertExpectations(t)
}
func TestGetContainerInfoOnNonExistContainer(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*kubecontainer.Pod{}
stats, _ := kubelet.GetContainerInfo("qux", "", "foo", nil)
if stats != nil {
t.Errorf("non-nil stats on non exist container")
}
mockCadvisor.AssertExpectations(t)
}
func TestGetContainerInfoWhenContainerRuntimeFailed(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
fakeRuntime := testKubelet.fakeRuntime
expectedErr := fmt.Errorf("List containers error")
fakeRuntime.Err = expectedErr
stats, err := kubelet.GetContainerInfo("qux", "", "foo", nil)
if err == nil {
t.Errorf("expected error from dockertools, got none")
}
if err.Error() != expectedErr.Error() {
t.Errorf("expected error %v got %v", expectedErr.Error(), err.Error())
}
if stats != nil {
t.Errorf("non-nil stats when dockertools failed")
}
mockCadvisor.AssertExpectations(t)
}
func TestGetContainerInfoWithNoContainers(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", nil)
if err == nil {
t.Errorf("expected error from cadvisor client, got none")
}
if err != ErrContainerNotFound {
t.Errorf("expected error %v, got %v", ErrContainerNotFound.Error(), err.Error())
}
if stats != nil {
t.Errorf("non-nil stats when dockertools returned no containers")
}
mockCadvisor.AssertExpectations(t)
}
func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
kubelet := testKubelet.kubelet
mockCadvisor := testKubelet.fakeCadvisor
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{Name: "bar",
ID: types.UID("fakeID"),
},
}},
}
stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", nil)
if err == nil {
t.Errorf("Expected error from cadvisor client, got none")
}
if err != ErrContainerNotFound {
t.Errorf("Expected error %v, got %v", ErrContainerNotFound.Error(), err.Error())
}
if stats != nil {
t.Errorf("non-nil stats when dockertools returned no containers")
}
mockCadvisor.AssertExpectations(t)
}
type fakeContainerCommandRunner struct {
Cmd []string
ID string
PodID types.UID
E error
Stdin io.Reader
Stdout io.WriteCloser
Stderr io.WriteCloser
TTY bool
Port uint16
Stream io.ReadWriteCloser
}
func (f *fakeContainerCommandRunner) RunInContainer(id string, cmd []string) ([]byte, error) {
f.Cmd = cmd
f.ID = id
return []byte{}, f.E
}
func (f *fakeContainerCommandRunner) ExecInContainer(id string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error {
f.Cmd = cmd
f.ID = id
f.Stdin = in
f.Stdout = out
f.Stderr = err
f.TTY = tty
return f.E
}
func (f *fakeContainerCommandRunner) PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error {
f.PodID = pod.ID
f.Port = port
f.Stream = stream
return nil
}
func TestRunInContainerNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*kubecontainer.Pod{}
podName := "podFoo"
podNamespace := "nsFoo"
containerName := "containerFoo"
output, err := kubelet.RunInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
containerName,
[]string{"ls"})
if output != nil {
t.Errorf("unexpected non-nil command: %v", output)
}
if err == nil {
t.Error("unexpected non-error")
}
}
func TestRunInContainer(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
containerID := "abc1234"
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "podFoo",
Namespace: "nsFoo",
Containers: []*kubecontainer.Container{
{Name: "containerFoo",
ID: types.UID(containerID),
},
},
},
}
cmd := []string{"ls"}
_, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd)
if fakeCommandRunner.ID != containerID {
t.Errorf("unexpected Name: %s", fakeCommandRunner.ID)
}
if !reflect.DeepEqual(fakeCommandRunner.Cmd, cmd) {
t.Errorf("unexpected command: %s", fakeCommandRunner.Cmd)
}
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestParseResolvConf(t *testing.T) {
testCases := []struct {
data string
nameservers []string
searches []string
}{
{"", []string{}, []string{}},
{" ", []string{}, []string{}},
{"\n", []string{}, []string{}},
{"\t\n\t", []string{}, []string{}},
{"#comment\n", []string{}, []string{}},
{" #comment\n", []string{}, []string{}},
{"#comment\n#comment", []string{}, []string{}},
{"#comment\nnameserver", []string{}, []string{}},
{"#comment\nnameserver\nsearch", []string{}, []string{}},
{"nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{" nameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"\tnameserver 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"nameserver\t1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"nameserver \t 1.2.3.4", []string{"1.2.3.4"}, []string{}},
{"nameserver 1.2.3.4\nnameserver 5.6.7.8", []string{"1.2.3.4", "5.6.7.8"}, []string{}},
{"search foo", []string{}, []string{"foo"}},
{"search foo bar", []string{}, []string{"foo", "bar"}},
{"search foo bar bat\n", []string{}, []string{"foo", "bar", "bat"}},
{"search foo\nsearch bar", []string{}, []string{"bar"}},
{"nameserver 1.2.3.4\nsearch foo bar", []string{"1.2.3.4"}, []string{"foo", "bar"}},
{"nameserver 1.2.3.4\nsearch foo\nnameserver 5.6.7.8\nsearch bar", []string{"1.2.3.4", "5.6.7.8"}, []string{"bar"}},
{"#comment\nnameserver 1.2.3.4\n#comment\nsearch foo\ncomment", []string{"1.2.3.4"}, []string{"foo"}},
}
for i, tc := range testCases {
ns, srch, err := parseResolvConf(strings.NewReader(tc.data))
if err != nil {
t.Errorf("expected success, got %v", err)
continue
}
if !reflect.DeepEqual(ns, tc.nameservers) {
t.Errorf("[%d] expected nameservers %#v, got %#v", i, tc.nameservers, ns)
}
if !reflect.DeepEqual(srch, tc.searches) {
t.Errorf("[%d] expected searches %#v, got %#v", i, tc.searches, srch)
}
}
}
func TestDNSConfigurationParams(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
clusterNS := "203.0.113.1"
kubelet.clusterDomain = "kubernetes.io"
kubelet.clusterDNS = net.ParseIP(clusterNS)
pods := newTestPods(2)
pods[0].Spec.DNSPolicy = api.DNSClusterFirst
pods[1].Spec.DNSPolicy = api.DNSDefault
options := make([]*kubecontainer.RunContainerOptions, 2)
for i, pod := range pods {
var err error
kubelet.volumeManager.SetVolumes(pod.UID, make(kubecontainer.VolumeMap, 0))
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{})
if err != nil {
t.Fatalf("failed to generate container options: %v", err)
}
}
if len(options[0].DNS) != 1 || options[0].DNS[0] != clusterNS {
t.Errorf("expected nameserver %s, got %+v", clusterNS, options[0].DNS)
}
if len(options[0].DNSSearch) == 0 || options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
t.Errorf("expected search %s, got %+v", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
}
if len(options[1].DNS) != 1 || options[1].DNS[0] != "127.0.0.1" {
t.Errorf("expected nameserver 127.0.0.1, got %+v", options[1].DNS)
}
if len(options[1].DNSSearch) != 1 || options[1].DNSSearch[0] != "." {
t.Errorf("expected search \".\", got %+v", options[1].DNSSearch)
}
kubelet.resolverConfig = "/etc/resolv.conf"
for i, pod := range pods {
var err error
options[i], err = kubelet.GenerateRunContainerOptions(pod, &api.Container{})
if err != nil {
t.Fatalf("failed to generate container options: %v", err)
}
}
t.Logf("nameservers %+v", options[1].DNS)
if len(options[0].DNS) != len(options[1].DNS)+1 {
t.Errorf("expected prepend of cluster nameserver, got %+v", options[0].DNS)
} else if options[0].DNS[0] != clusterNS {
t.Errorf("expected nameserver %s, got %v", clusterNS, options[0].DNS[0])
}
if len(options[0].DNSSearch) != len(options[1].DNSSearch)+3 {
t.Errorf("expected prepend of cluster domain, got %+v", options[0].DNSSearch)
} else if options[0].DNSSearch[0] != ".svc."+kubelet.clusterDomain {
t.Errorf("expected domain %s, got %s", ".svc."+kubelet.clusterDomain, options[0].DNSSearch)
}
}
type testServiceLister struct {
services []api.Service
}
func (ls testServiceLister) List() (api.ServiceList, error) {
return api.ServiceList{
Items: ls.services,
}, nil
}
type testNodeLister struct {
nodes []api.Node
}
func (ls testNodeLister) GetNodeInfo(id string) (*api.Node, error) {
return nil, errors.New("not implemented")
}
func (ls testNodeLister) List() (api.NodeList, error) {
return api.NodeList{
Items: ls.nodes,
}, nil
}
type envs []kubecontainer.EnvVar
func (e envs) Len() int {
return len(e)
}
func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
func TestMakeEnvironmentVariables(t *testing.T) {
services := []api.Service{
{
ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: api.NamespaceDefault},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8081,
}},
ClusterIP: "1.2.3.1",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test1"},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8083,
}},
ClusterIP: "1.2.3.3",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: "test2"},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8084,
}},
ClusterIP: "1.2.3.4",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test2"},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8085,
}},
ClusterIP: "1.2.3.5",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test2"},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8085,
}},
ClusterIP: "None",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test2"},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8085,
}},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: "kubernetes"},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8086,
}},
ClusterIP: "1.2.3.6",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "not-special", Namespace: "kubernetes"},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8088,
}},
ClusterIP: "1.2.3.8",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "not-special", Namespace: "kubernetes"},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8088,
}},
ClusterIP: "None",
},
},
{
ObjectMeta: api.ObjectMeta{Name: "not-special", Namespace: "kubernetes"},
Spec: api.ServiceSpec{
Ports: []api.ServicePort{{
Protocol: "TCP",
Port: 8088,
}},
ClusterIP: "",
},
},
}
testCases := []struct {
name string // the name of the test case
ns string // the namespace to generate environment for
container *api.Container // the container to use
masterServiceNs string // the namespace to read master service info from
nilLister bool // whether the lister should be nil
expectedEnvs []kubecontainer.EnvVar // a set of expected environment vars
}{
{
name: "api server = Y, kubelet = Y",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
masterServiceNs: api.NamespaceDefault,
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "api server = Y, kubelet = N",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
masterServiceNs: api.NamespaceDefault,
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAR"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
},
},
{
name: "api server = N; kubelet = Y",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "BAZ"},
},
},
masterServiceNs: api.NamespaceDefault,
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "BAZ"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
{Name: "TEST_SERVICE_PORT", Value: "8083"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
{Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
{Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
{Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
{Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
},
},
{
name: "master service in pod ns",
ns: "test2",
container: &api.Container{
Env: []api.EnvVar{
{Name: "FOO", Value: "ZAP"},
},
},
masterServiceNs: "kubernetes",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "FOO", Value: "ZAP"},
{Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"},
{Name: "TEST_SERVICE_PORT", Value: "8085"},
{Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"},
{Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"},
{Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"},
{Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"},
{Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"},
{Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"},
},
},
{
name: "pod in master service ns",
ns: "kubernetes",
container: &api.Container{},
masterServiceNs: "kubernetes",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"},
{Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"},
{Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"},
{Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.6"},
{Name: "KUBERNETES_SERVICE_PORT", Value: "8086"},
{Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.6:8086"},
{Name: "KUBERNETES_PORT_8086_TCP", Value: "tcp://1.2.3.6:8086"},
{Name: "KUBERNETES_PORT_8086_TCP_PROTO", Value: "tcp"},
{Name: "KUBERNETES_PORT_8086_TCP_PORT", Value: "8086"},
{Name: "KUBERNETES_PORT_8086_TCP_ADDR", Value: "1.2.3.6"},
},
},
{
name: "downward api pod",
ns: "downward-api",
container: &api.Container{
Env: []api.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Version(),
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Version(),
FieldPath: "metadata.namespace",
},
},
},
{
Name: "POD_IP",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Version(),
FieldPath: "status.podIP",
},
},
},
},
},
masterServiceNs: "nothing",
nilLister: true,
expectedEnvs: []kubecontainer.EnvVar{
{Name: "POD_NAME", Value: "dapi-test-pod-name"},
{Name: "POD_NAMESPACE", Value: "downward-api"},
{Name: "POD_IP", Value: "1.2.3.4"},
},
},
{
name: "env expansion",
ns: "test1",
container: &api.Container{
Env: []api.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
APIVersion: testapi.Version(),
FieldPath: "metadata.name",
},
},
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-$(EMPTY_VAR)",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-$(POD_NAME)",
},
{
Name: "POD_NAME_TEST3",
Value: "$(POD_NAME_TEST2)-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-$(TEST_LITERAL)",
},
{
Name: "SERVICE_VAR_TEST",
Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
},
},
masterServiceNs: "nothing",
nilLister: false,
expectedEnvs: []kubecontainer.EnvVar{
{
Name: "TEST_LITERAL",
Value: "test-test-test",
},
{
Name: "POD_NAME",
Value: "dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST2",
Value: "test2-dapi-test-pod-name",
},
{
Name: "POD_NAME_TEST3",
Value: "test2-dapi-test-pod-name-3",
},
{
Name: "LITERAL_TEST",
Value: "literal-test-test-test",
},
{
Name: "TEST_SERVICE_HOST",
Value: "1.2.3.3",
},
{
Name: "TEST_SERVICE_PORT",
Value: "8083",
},
{
Name: "TEST_PORT",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP",
Value: "tcp://1.2.3.3:8083",
},
{
Name: "TEST_PORT_8083_TCP_PROTO",
Value: "tcp",
},
{
Name: "TEST_PORT_8083_TCP_PORT",
Value: "8083",
},
{
Name: "TEST_PORT_8083_TCP_ADDR",
Value: "1.2.3.3",
},
{
Name: "SERVICE_VAR_TEST",
Value: "1.2.3.3:8083",
},
{
Name: "OUT_OF_ORDER_TEST",
Value: "$(OUT_OF_ORDER_TARGET)",
},
{
Name: "OUT_OF_ORDER_TARGET",
Value: "FOO",
},
{
Name: "TEST_UNDEFINED",
Value: "$(UNDEFINED_VAR)",
},
{
Name: "EMPTY_VAR",
},
{
Name: "EMPTY_TEST",
Value: "foo-",
},
},
},
}
for i, tc := range testCases {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
kl.masterServiceNamespace = tc.masterServiceNs
if tc.nilLister {
kl.serviceLister = nil
} else {
kl.serviceLister = testServiceLister{services}
}
testPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Namespace: tc.ns,
Name: "dapi-test-pod-name",
},
}
testPod.Status.PodIP = "1.2.3.4"
result, err := kl.makeEnvironmentVariables(testPod, tc.container)
if err != nil {
t.Errorf("[%v] Unexpected error: %v", tc.name, err)
}
sort.Sort(envs(result))
sort.Sort(envs(tc.expectedEnvs))
if !reflect.DeepEqual(result, tc.expectedEnvs) {
t.Errorf("%d: [%v] Unexpected env entries; expected {%v}, got {%v}", i, tc.name, tc.expectedEnvs, result)
}
}
}
func runningState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
}
}
func stoppedState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
}
}
func succeededState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: 0,
},
},
}
}
func failedState(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{
ExitCode: -1,
},
},
}
}
func TestPodPhaseWithRestartAlways(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyAlways,
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
api.PodRunning,
"all running",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
stoppedState("containerA"),
stoppedState("containerB"),
},
},
},
api.PodRunning,
"all stopped with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
stoppedState("containerB"),
},
},
},
api.PodRunning,
"mixed state #1 with restart always",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
},
},
},
api.PodPending,
"mixed state #2 with restart always",
},
}
for _, test := range tests {
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func TestPodPhaseWithRestartNever(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyNever,
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
api.PodRunning,
"all running with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
api.PodSucceeded,
"all succeeded with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
},
},
},
api.PodFailed,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
},
},
},
api.PodRunning,
"mixed state #1 with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
},
},
},
api.PodPending,
"mixed state #2 with restart never",
},
}
for _, test := range tests {
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func TestPodPhaseWithRestartOnFailure(t *testing.T) {
desiredState := api.PodSpec{
NodeName: "machine",
Containers: []api.Container{
{Name: "containerA"},
{Name: "containerB"},
},
RestartPolicy: api.RestartPolicyOnFailure,
}
tests := []struct {
pod *api.Pod
status api.PodPhase
test string
}{
{&api.Pod{Spec: desiredState, Status: api.PodStatus{}}, api.PodPending, "waiting"},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
runningState("containerB"),
},
},
},
api.PodRunning,
"all running with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
succeededState("containerA"),
succeededState("containerB"),
},
},
},
api.PodSucceeded,
"all succeeded with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
failedState("containerA"),
failedState("containerB"),
},
},
},
api.PodRunning,
"all failed with restart never",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
succeededState("containerB"),
},
},
},
api.PodRunning,
"mixed state #1 with restart onfailure",
},
{
&api.Pod{
Spec: desiredState,
Status: api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
runningState("containerA"),
},
},
},
api.PodPending,
"mixed state #2 with restart onfailure",
},
}
for _, test := range tests {
if status := GetPhase(&test.pod.Spec, test.pod.Status.ContainerStatuses); status != test.status {
t.Errorf("In test %s, expected %v, got %v", test.test, test.status, status)
}
}
}
func getReadyStatus(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
Ready: true,
}
}
func getNotReadyStatus(cName string) api.ContainerStatus {
return api.ContainerStatus{
Name: cName,
Ready: false,
}
}
func TestGetPodReadyCondition(t *testing.T) {
ready := []api.PodCondition{{
Type: api.PodReady,
Status: api.ConditionTrue,
}}
unready := []api.PodCondition{{
Type: api.PodReady,
Status: api.ConditionFalse,
}}
tests := []struct {
spec *api.PodSpec
info []api.ContainerStatus
expected []api.PodCondition
}{
{
spec: nil,
info: nil,
expected: unready,
},
{
spec: &api.PodSpec{},
info: []api.ContainerStatus{},
expected: ready,
},
{
spec: &api.PodSpec{
Containers: []api.Container{
{Name: "1234"},
},
},
info: []api.ContainerStatus{},
expected: unready,
},
{
spec: &api.PodSpec{
Containers: []api.Container{
{Name: "1234"},
},
},
info: []api.ContainerStatus{
getReadyStatus("1234"),
},
expected: ready,
},
{
spec: &api.PodSpec{
Containers: []api.Container{
{Name: "1234"},
{Name: "5678"},
},
},
info: []api.ContainerStatus{
getReadyStatus("1234"),
getReadyStatus("5678"),
},
expected: ready,
},
{
spec: &api.PodSpec{
Containers: []api.Container{
{Name: "1234"},
{Name: "5678"},
},
},
info: []api.ContainerStatus{
getReadyStatus("1234"),
},
expected: unready,
},
{
spec: &api.PodSpec{
Containers: []api.Container{
{Name: "1234"},
{Name: "5678"},
},
},
info: []api.ContainerStatus{
getReadyStatus("1234"),
getNotReadyStatus("5678"),
},
expected: unready,
},
}
for i, test := range tests {
condition := getPodReadyCondition(test.spec, test.info)
if !reflect.DeepEqual(condition, test.expected) {
t.Errorf("On test case %v, expected:\n%+v\ngot\n%+v\n", i, test.expected, condition)
}
}
}
func TestExecInContainerNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
fakeRuntime.PodList = []*kubecontainer.Pod{}
podName := "podFoo"
podNamespace := "nsFoo"
containerID := "containerFoo"
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
containerID,
[]string{"ls"},
nil,
nil,
nil,
false,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if fakeCommandRunner.ID != "" {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
func TestExecInContainerNoSuchContainer(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
podNamespace := "nsFoo"
containerID := "containerFoo"
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: "bar",
ID: "barID"},
},
},
}
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
containerID,
[]string{"ls"},
nil,
nil,
nil,
false,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if fakeCommandRunner.ID != "" {
t.Fatal("unexpected invocation of runner.ExecInContainer")
}
}
type fakeReadWriteCloser struct{}
func (f *fakeReadWriteCloser) Write(data []byte) (int, error) {
return 0, nil
}
func (f *fakeReadWriteCloser) Read(data []byte) (int, error) {
return 0, nil
}
func (f *fakeReadWriteCloser) Close() error {
return nil
}
func TestExecInContainer(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
podNamespace := "nsFoo"
containerID := "containerFoo"
command := []string{"ls"}
stdin := &bytes.Buffer{}
stdout := &fakeReadWriteCloser{}
stderr := &fakeReadWriteCloser{}
tty := true
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{Name: containerID,
ID: types.UID(containerID),
},
},
},
}
err := kubelet.ExecInContainer(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
containerID,
[]string{"ls"},
stdin,
stdout,
stderr,
tty,
)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := containerID, fakeCommandRunner.ID; e != a {
t.Fatalf("container name: expected %q, got %q", e, a)
}
if e, a := command, fakeCommandRunner.Cmd; !reflect.DeepEqual(e, a) {
t.Fatalf("command: expected '%v', got '%v'", e, a)
}
if e, a := stdin, fakeCommandRunner.Stdin; e != a {
t.Fatalf("stdin: expected %#v, got %#v", e, a)
}
if e, a := stdout, fakeCommandRunner.Stdout; e != a {
t.Fatalf("stdout: expected %#v, got %#v", e, a)
}
if e, a := stderr, fakeCommandRunner.Stderr; e != a {
t.Fatalf("stderr: expected %#v, got %#v", e, a)
}
if e, a := tty, fakeCommandRunner.TTY; e != a {
t.Fatalf("tty: expected %t, got %t", e, a)
}
}
func TestPortForwardNoSuchPod(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
fakeRuntime.PodList = []*kubecontainer.Pod{}
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
podName := "podFoo"
podNamespace := "nsFoo"
var port uint16 = 5000
err := kubelet.PortForward(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{Name: podName, Namespace: podNamespace}}),
"",
port,
nil,
)
if err == nil {
t.Fatal("unexpected non-error")
}
if fakeCommandRunner.ID != "" {
t.Fatal("unexpected invocation of runner.PortForward")
}
}
func TestPortForward(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
fakeRuntime := testKubelet.fakeRuntime
podName := "podFoo"
podNamespace := "nsFoo"
podID := types.UID("12345678")
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: podID,
Name: podName,
Namespace: podNamespace,
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: "containerFoo",
},
},
},
}
fakeCommandRunner := fakeContainerCommandRunner{}
kubelet.runner = &fakeCommandRunner
var port uint16 = 5000
stream := &fakeReadWriteCloser{}
err := kubelet.PortForward(
kubecontainer.GetPodFullName(&api.Pod{ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: podName,
Namespace: podNamespace,
}}),
"",
port,
stream,
)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := podID, fakeCommandRunner.PodID; e != a {
t.Fatalf("container id: expected %q, got %q", e, a)
}
if e, a := port, fakeCommandRunner.Port; e != a {
t.Fatalf("port: expected %v, got %v", e, a)
}
if e, a := stream, fakeCommandRunner.Stream; e != a {
t.Fatalf("stream: expected %v, got %v", e, a)
}
}
// Tests that identify the host port conflicts are detected correctly.
func TestGetHostPortConflicts(t *testing.T) {
pods := []*api.Pod{
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}},
{Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 83}}}}}},
}
// Pods should not cause any conflict.
if hasHostPortConflicts(pods) {
t.Errorf("expected no conflicts, Got conflicts")
}
expected := &api.Pod{
Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}},
}
// The new pod should cause conflict and be reported.
pods = append(pods, expected)
if !hasHostPortConflicts(pods) {
t.Errorf("expected no conflict, Got no conflicts")
}
}
// Tests that we handle port conflicts correctly by setting the failed status in status map.
func TestHandlePortConflicts(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
spec := api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "newpod",
Namespace: "foo",
},
Spec: spec,
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "oldpod",
Namespace: "foo",
},
Spec: spec,
},
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = util.NewTime(time.Now())
pods[0].CreationTimestamp = util.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
conflictedPod := pods[0]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
status, found := kl.statusManager.GetPodStatus(conflictedPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", conflictedPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
}
// Tests that we handle not matching labels selector correctly by setting the failed status in status map.
func TestHandleNodeSelector(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}},
}}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "podA",
Namespace: "foo",
},
Spec: api.PodSpec{NodeSelector: map[string]string{"key": "A"}},
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "podB",
Namespace: "foo",
},
Spec: api.PodSpec{NodeSelector: map[string]string{"key": "B"}},
},
}
// The first pod should be rejected.
notfittingPod := pods[0]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
}
// Tests that we handle exceeded resources correctly by setting the failed status in status map.
func TestHandleMemExceeded(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{MemoryCapacity: 100}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
spec := api.PodSpec{Containers: []api.Container{{Resources: api.ResourceRequirements{
Requests: api.ResourceList{
"memory": resource.MustParse("90"),
},
}}}}
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "123456789",
Name: "newpod",
Namespace: "foo",
},
Spec: spec,
},
{
ObjectMeta: api.ObjectMeta{
UID: "987654321",
Name: "oldpod",
Namespace: "foo",
},
Spec: spec,
},
}
// Make sure the Pods are in the reverse order of creation time.
pods[1].CreationTimestamp = util.NewTime(time.Now())
pods[0].CreationTimestamp = util.NewTime(time.Now().Add(1 * time.Second))
// The newer pod should be rejected.
notfittingPod := pods[0]
kl.HandlePodAdditions(pods)
// Check pod status stored in the status map.
status, found := kl.statusManager.GetPodStatus(notfittingPod.UID)
if !found {
t.Fatalf("status of pod %q is not found in the status map", notfittingPod.UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q. Got %q.", api.PodFailed, status.Phase)
}
}
// TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal.
func TestPurgingObsoleteStatusMapEntries(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
{ObjectMeta: api.ObjectMeta{Name: "pod1", UID: "1234"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
{ObjectMeta: api.ObjectMeta{Name: "pod2", UID: "4567"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}},
}
podToTest := pods[1]
// Run once to populate the status map.
kl.HandlePodAdditions(pods)
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); !found {
t.Fatalf("expected to have status cached for pod2")
}
// Sync with empty pods so that the entry in status map will be removed.
kl.podManager.SetPods([]*api.Pod{})
kl.HandlePodCleanups()
if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found {
t.Fatalf("expected to not have status cached for pod2")
}
}
func TestValidatePodStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
testCases := []struct {
podPhase api.PodPhase
success bool
}{
{api.PodRunning, true},
{api.PodSucceeded, true},
{api.PodFailed, true},
{api.PodPending, false},
{api.PodUnknown, false},
}
for i, tc := range testCases {
err := kubelet.validatePodPhase(&api.PodStatus{Phase: tc.podPhase})
if tc.success {
if err != nil {
t.Errorf("[case %d]: unexpected failure - %v", i, err)
}
} else if err == nil {
t.Errorf("[case %d]: unexpected success", i)
}
}
}
func TestValidateContainerStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
containerName := "x"
testCases := []struct {
statuses []api.ContainerStatus
success bool
}{
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
LastTerminationState: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Terminated: &api.ContainerStateTerminated{},
},
},
},
success: true,
},
{
statuses: []api.ContainerStatus{
{
Name: containerName,
State: api.ContainerState{
Waiting: &api.ContainerStateWaiting{},
},
},
},
success: false,
},
}
for i, tc := range testCases {
_, err := kubelet.validateContainerStatus(&api.PodStatus{
ContainerStatuses: tc.statuses,
}, containerName, false)
if tc.success {
if err != nil {
t.Errorf("[case %d]: unexpected failure - %v", i, err)
}
} else if err == nil {
t.Errorf("[case %d]: unexpected success", i)
}
}
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, "blah", false); err == nil {
t.Errorf("expected error with invalid container name")
}
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
ContainerStatuses: testCases[0].statuses,
}, containerName, true); err != nil {
t.Errorf("unexpected error with for previous terminated container - %v", err)
}
if _, err := kubelet.validateContainerStatus(&api.PodStatus{
ContainerStatuses: testCases[1].statuses,
}, containerName, true); err == nil {
t.Errorf("expected error with for previous terminated container")
}
}
func TestUpdateNewNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
machineInfo := &cadvisorApi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorApi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: util.Time{},
LastTransitionTime: util.Time{},
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OsImage: "Debian GNU/Linux 7 (wheezy)",
ContainerRuntimeVersion: "docker://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(testclient.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
if updatedNode.Status.Conditions[0].LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp")
}
if updatedNode.Status.Conditions[0].LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp")
}
updatedNode.Status.Conditions[0].LastHeartbeatTime = util.Time{}
updatedNode.Status.Conditions[0].LastTransitionTime = util.Time{}
if !reflect.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode))
}
}
func TestUpdateExistingNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(2048, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},
},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
machineInfo := &cadvisorApi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorApi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Reason: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: util.Time{}, // placeholder
LastTransitionTime: util.Time{}, // placeholder
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OsImage: "Debian GNU/Linux 7 (wheezy)",
ContainerRuntimeVersion: "docker://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
},
}
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Errorf("unexpected actions: %v", actions)
}
updateAction, ok := actions[1].(testclient.UpdateAction)
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
updatedNode, ok := updateAction.GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected object type")
}
// Expect LastProbeTime to be updated to Now, while LastTransitionTime to be the same.
if reflect.DeepEqual(updatedNode.Status.Conditions[0].LastHeartbeatTime.Rfc3339Copy().UTC(), util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time) {
t.Errorf("expected \n%v\n, got \n%v", util.Now(), util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC))
}
if !reflect.DeepEqual(updatedNode.Status.Conditions[0].LastTransitionTime.Rfc3339Copy().UTC(), util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time) {
t.Errorf("expected \n%#v\n, got \n%#v", updatedNode.Status.Conditions[0].LastTransitionTime.Rfc3339Copy(),
util.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC))
}
updatedNode.Status.Conditions[0].LastHeartbeatTime = util.Time{}
updatedNode.Status.Conditions[0].LastTransitionTime = util.Time{}
if !reflect.DeepEqual(expectedNode, updatedNode) {
t.Errorf("expected \n%v\n, got \n%v", expectedNode, updatedNode)
}
}
func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
fakeRuntime := testKubelet.fakeRuntime
// This causes returning an error from GetContainerRuntimeVersion() which
// simulates that container runtime is down.
fakeRuntime.VersionInfo = ""
kubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
machineInfo := &cadvisorApi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorApi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeReady,
Status: api.ConditionFalse,
Reason: fmt.Sprintf("container runtime is down"),
LastHeartbeatTime: util.Time{},
LastTransitionTime: util.Time{},
},
},
NodeInfo: api.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
KernelVersion: "3.16.0-0.bpo.4-amd64",
OsImage: "Debian GNU/Linux 7 (wheezy)",
ContainerRuntimeVersion: "docker://1.5.0",
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
},
},
}
kubelet.runtimeUpThreshold = time.Duration(0)
kubelet.updateRuntimeUp()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 2 {
t.Fatalf("unexpected actions: %v", actions)
}
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(testclient.UpdateAction).GetObject().(*api.Node)
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
if updatedNode.Status.Conditions[0].LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp")
}
if updatedNode.Status.Conditions[0].LastTransitionTime.IsZero() {
t.Errorf("unexpected zero last transition timestamp")
}
updatedNode.Status.Conditions[0].LastHeartbeatTime = util.Time{}
updatedNode.Status.Conditions[0].LastTransitionTime = util.Time{}
if !reflect.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", util.ObjectDiff(expectedNode, updatedNode))
}
}
func TestUpdateNodeStatusError(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{}}).ReactionChain
if err := kubelet.updateNodeStatus(); err == nil {
t.Errorf("unexpected non error: %v", err)
}
if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
}
}
func TestCreateMirrorPod(t *testing.T) {
for _, updateType := range []SyncPodType{SyncPodCreate, SyncPodUpdate} {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "foo",
Annotations: map[string]string{
ConfigSourceAnnotationKey: "file",
},
},
}
pods := []*api.Pod{pod}
kl.podManager.SetPods(pods)
err := kl.syncPod(pod, nil, container.Pod{}, updateType)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
podFullName := kubecontainer.GetPodFullName(pod)
if !manager.HasPod(podFullName) {
t.Errorf("expected mirror pod %q to be created", podFullName)
}
if manager.NumOfPods() != 1 || !manager.HasPod(podFullName) {
t.Errorf("expected one mirror pod %q, got %v", podFullName, manager.GetPods())
}
}
}
func TestDeleteOutdatedMirrorPod(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "ns",
Annotations: map[string]string{
ConfigSourceAnnotationKey: "file",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "foo"},
},
},
}
// Mirror pod has an outdated spec.
mirrorPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "11111111",
Name: "foo",
Namespace: "ns",
Annotations: map[string]string{
ConfigSourceAnnotationKey: "api",
ConfigMirrorAnnotationKey: "mirror",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "1234", Image: "bar"},
},
},
}
pods := []*api.Pod{pod, mirrorPod}
kl.podManager.SetPods(pods)
err := kl.syncPod(pod, mirrorPod, container.Pod{}, SyncPodUpdate)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
if creates != 0 || deletes != 1 {
t.Errorf("expected 0 creation and 1 deletion of %q, got %d, %d", name, creates, deletes)
}
}
func TestDeleteOrphanedMirrorPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
manager := testKubelet.fakeMirrorClient
orphanPods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
Annotations: map[string]string{
ConfigSourceAnnotationKey: "api",
ConfigMirrorAnnotationKey: "mirror",
},
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
Annotations: map[string]string{
ConfigSourceAnnotationKey: "api",
ConfigMirrorAnnotationKey: "mirror",
},
},
},
}
kl.podManager.SetPods(orphanPods)
// Sync with an empty pod list to delete all mirror pods.
kl.HandlePodCleanups()
if manager.NumOfPods() != 0 {
t.Errorf("expected zero mirror pods, got %v", manager.GetPods())
}
for _, pod := range orphanPods {
name := kubecontainer.GetPodFullName(pod)
creates, deletes := manager.GetCounts(name)
if creates != 0 || deletes != 1 {
t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes)
}
}
}
func TestGetContainerInfoForMirrorPods(t *testing.T) {
// pods contain one static and one mirror pod with the same name but
// different UIDs.
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "1234",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
ConfigSourceAnnotationKey: "file",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "5678",
Name: "qux",
Namespace: "ns",
Annotations: map[string]string{
ConfigSourceAnnotationKey: "api",
ConfigMirrorAnnotationKey: "mirror",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
},
},
}
containerID := "ab2cdf"
containerPath := fmt.Sprintf("/docker/%v", containerID)
containerInfo := cadvisorApi.ContainerInfo{
ContainerReference: cadvisorApi.ContainerReference{
Name: containerPath,
},
}
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
mockCadvisor := testKubelet.fakeCadvisor
cadvisorReq := &cadvisorApi.ContainerInfoRequest{}
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
kubelet := testKubelet.kubelet
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
Name: "foo",
ID: types.UID(containerID),
},
},
},
}
kubelet.podManager.SetPods(pods)
// Use the mirror pod UID to retrieve the stats.
stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if stats == nil {
t.Fatalf("stats should not be nil")
}
mockCadvisor.AssertExpectations(t)
}
func TestDoNotCacheStatusForStaticPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
kubelet := testKubelet.kubelet
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "staticFoo",
Namespace: "new",
Annotations: map[string]string{
ConfigSourceAnnotationKey: "file",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "bar"},
},
},
},
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodSyncs(kubelet.podManager.GetPods())
status, ok := kubelet.statusManager.GetPodStatus(pods[0].UID)
if ok {
t.Errorf("unexpected status %#v found for static pod %q", status, pods[0].UID)
}
}
func TestHostNetworkAllowed(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{ApiserverSource, FileSource},
},
})
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
Annotations: map[string]string{
ConfigSourceAnnotationKey: FileSource,
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
HostNetwork: true,
},
}
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.syncPod(pod, nil, container.Pod{}, SyncPodUpdate)
if err != nil {
t.Errorf("expected pod infra creation to succeed: %v", err)
}
}
func TestHostNetworkDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
},
})
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
Annotations: map[string]string{
ConfigSourceAnnotationKey: FileSource,
},
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
HostNetwork: true,
},
}
err := kubelet.syncPod(pod, nil, container.Pod{}, SyncPodUpdate)
if err == nil {
t.Errorf("expected pod infra creation to fail")
}
}
func TestPrivilegeContainerAllowed(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: true,
})
privileged := true
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
},
}
kubelet.podManager.SetPods([]*api.Pod{pod})
err := kubelet.syncPod(pod, nil, container.Pod{}, SyncPodUpdate)
if err != nil {
t.Errorf("expected pod infra creation to succeed: %v", err)
}
}
func TestPrivilegeContainerDisallowed(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
capabilities.SetForTests(capabilities.Capabilities{
AllowPrivileged: false,
})
privileged := true
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo", SecurityContext: &api.SecurityContext{Privileged: &privileged}},
},
},
}
err := kubelet.syncPod(pod, nil, container.Pod{}, SyncPodUpdate)
if err == nil {
t.Errorf("expected pod infra creation to fail")
}
}
func TestFilterOutTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
pods[0].Status.Phase = api.PodFailed
pods[1].Status.Phase = api.PodSucceeded
pods[2].Status.Phase = api.PodRunning
pods[3].Status.Phase = api.PodPending
expected := []*api.Pod{pods[2], pods[3], pods[4]}
kubelet.podManager.SetPods(pods)
actual := kubelet.filterOutTerminatedPods(pods)
if !reflect.DeepEqual(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}
func TestRegisterExistingNodeWithApiserver(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("create", "nodes", func(action testclient.Action) (bool, runtime.Object, error) {
// Return an error on create.
return true, &api.Node{}, &apierrors.StatusError{
ErrStatus: api.Status{Reason: api.StatusReasonAlreadyExists},
}
})
kubeClient.AddReactor("get", "nodes", func(action testclient.Action) (bool, runtime.Object, error) {
// Return an existing (matching) node on get.
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action testclient.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
machineInfo := &cadvisorApi.MachineInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
NumCores: 2,
MemoryCapacity: 1024,
}
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
versionInfo := &cadvisorApi.VersionInfo{
KernelVersion: "3.16.0-0.bpo.4-amd64",
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
DockerVersion: "1.5.0",
}
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
done := make(chan struct{})
go func() {
kubelet.registerWithApiserver()
done <- struct{}{}
}()
select {
case <-time.After(5 * time.Second):
t.Errorf("timed out waiting for registration")
case <-done:
return
}
}
func TestMakePortMappings(t *testing.T) {
tests := []struct {
container *api.Container
expectedPortMappings []kubecontainer.PortMapping
}{
{
&api.Container{
Name: "fooContainer",
Ports: []api.ContainerPort{
{
Protocol: api.ProtocolTCP,
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
Protocol: api.ProtocolTCP,
ContainerPort: 443,
HostPort: 4343,
HostIP: "192.168.0.1",
},
{
Name: "foo",
Protocol: api.ProtocolUDP,
ContainerPort: 555,
HostPort: 5555,
},
{
Name: "foo", // Duplicated, should be ignored.
Protocol: api.ProtocolUDP,
ContainerPort: 888,
HostPort: 8888,
},
{
Protocol: api.ProtocolTCP, // Duplicated, should be ignored.
ContainerPort: 80,
HostPort: 8888,
},
},
},
[]kubecontainer.PortMapping{
{
Name: "fooContainer-TCP:80",
Protocol: api.ProtocolTCP,
ContainerPort: 80,
HostPort: 8080,
HostIP: "127.0.0.1",
},
{
Name: "fooContainer-TCP:443",
Protocol: api.ProtocolTCP,
ContainerPort: 443,
HostPort: 4343,
HostIP: "192.168.0.1",
},
{
Name: "fooContainer-foo",
Protocol: api.ProtocolUDP,
ContainerPort: 555,
HostPort: 5555,
HostIP: "",
},
},
},
}
for i, tt := range tests {
actual := makePortMappings(tt.container)
if !reflect.DeepEqual(tt.expectedPortMappings, actual) {
t.Errorf("%d: Expected: %#v, saw: %#v", i, tt.expectedPortMappings, actual)
}
}
}
func TestIsPodPastActiveDeadline(t *testing.T) {
testKubelet := newTestKubelet(t)
kubelet := testKubelet.kubelet
pods := newTestPods(5)
exceededActiveDeadlineSeconds := int64(30)
notYetActiveDeadlineSeconds := int64(120)
now := util.Now()
startTime := util.NewTime(now.Time.Add(-1 * time.Minute))
pods[0].Status.StartTime = &startTime
pods[0].Spec.ActiveDeadlineSeconds = &exceededActiveDeadlineSeconds
pods[1].Status.StartTime = &startTime
pods[1].Spec.ActiveDeadlineSeconds = ¬YetActiveDeadlineSeconds
tests := []struct {
pod *api.Pod
expected bool
}{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}, {pods[4], false}}
kubelet.podManager.SetPods(pods)
for i, tt := range tests {
actual := kubelet.pastActiveDeadline(tt.pod)
if actual != tt.expected {
t.Errorf("[%d] expected %#v, got %#v", i, tt.expected, actual)
}
}
}
func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) {
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
now := util.Now()
startTime := util.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(30)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: api.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
},
}
// Let the pod worker sets the status to fail after this sync.
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
if !found {
t.Errorf("expected to found status for pod %q", pods[0].UID)
}
if status.Phase != api.PodFailed {
t.Fatalf("expected pod status %q, ot %q.", api.PodFailed, status.Phase)
}
}
func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) {
testKubelet := newTestKubelet(t)
fakeRuntime := testKubelet.fakeRuntime
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
kubelet := testKubelet.kubelet
now := util.Now()
startTime := util.NewTime(now.Time.Add(-1 * time.Minute))
exceededActiveDeadlineSeconds := int64(300)
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
{Name: "foo"},
},
ActiveDeadlineSeconds: &exceededActiveDeadlineSeconds,
},
Status: api.PodStatus{
StartTime: &startTime,
},
},
}
fakeRuntime.PodList = []*kubecontainer.Pod{
{
ID: "12345678",
Name: "bar",
Namespace: "new",
Containers: []*kubecontainer.Container{
{Name: "foo"},
},
},
}
kubelet.podManager.SetPods(pods)
kubelet.HandlePodUpdates(pods)
status, found := kubelet.statusManager.GetPodStatus(pods[0].UID)
if !found {
t.Errorf("expected to found status for pod %q", pods[0].UID)
}
if status.Phase == api.PodFailed {
t.Fatalf("expected pod status to not be %q", status.Phase)
}
}
func TestDeletePodDirsForDeletedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
},
},
}
kl.podManager.SetPods(pods)
// Sync to create pod directories.
kl.HandlePodSyncs(kl.podManager.GetPods())
for i := range pods {
if !dirExists(kl.getPodDir(pods[i].UID)) {
t.Errorf("expected directory to exist for pod %d", i)
}
}
// Pod 1 has been deleted and no longer exists.
kl.podManager.SetPods([]*api.Pod{pods[0]})
kl.HandlePodCleanups()
if !dirExists(kl.getPodDir(pods[0].UID)) {
t.Errorf("expected directory to exist for pod 0")
}
if dirExists(kl.getPodDir(pods[1].UID)) {
t.Errorf("expected directory to be deleted for pod 1")
}
}
func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*api.Pod, podsToCheck []*api.Pod, shouldExist bool) {
kl := testKubelet.kubelet
kl.podManager.SetPods(pods)
kl.HandlePodSyncs(pods)
kl.HandlePodCleanups()
for i, pod := range podsToCheck {
exist := dirExists(kl.getPodDir(pod.UID))
if shouldExist && !exist {
t.Errorf("expected directory to exist for pod %d", i)
} else if !shouldExist && exist {
t.Errorf("expected directory to be removed for pod %d", i)
}
}
}
func TestDoesNotDeletePodDirsForTerminatedPods(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
kl := testKubelet.kubelet
pods := []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "pod1",
Namespace: "ns",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345679",
Name: "pod2",
Namespace: "ns",
},
},
{
ObjectMeta: api.ObjectMeta{
UID: "12345680",
Name: "pod3",
Namespace: "ns",
},
},
}
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
// Pod 1 failed, and pod 2 succeeded. None of the pod directories should be
// deleted.
kl.statusManager.SetPodStatus(pods[1], api.PodStatus{Phase: api.PodFailed})
kl.statusManager.SetPodStatus(pods[2], api.PodStatus{Phase: api.PodSucceeded})
syncAndVerifyPodDir(t, testKubelet, pods, pods, true)
}
func TestDoesNotDeletePodDirsIfContainerIsRunning(t *testing.T) {
testKubelet := newTestKubelet(t)
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
runningPod := &kubecontainer.Pod{
ID: "12345678",
Name: "pod1",
Namespace: "ns",
}
apiPod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: runningPod.ID,
Name: runningPod.Name,
Namespace: runningPod.Namespace,
},
}
// Sync once to create pod directory; confirm that the pod directory has
// already been created.
pods := []*api.Pod{apiPod}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
// Pretend the pod is deleted from apiserver, but is still active on the node.
// The pod directory should not be removed.
pods = []*api.Pod{}
testKubelet.fakeRuntime.PodList = []*kubecontainer.Pod{runningPod}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, true)
// The pod is deleted and also not active on the node. The pod directory
// should be removed.
pods = []*api.Pod{}
testKubelet.fakeRuntime.PodList = []*kubecontainer.Pod{}
syncAndVerifyPodDir(t, testKubelet, pods, []*api.Pod{apiPod}, false)
}
func TestCleanupBandwidthLimits(t *testing.T) {
tests := []struct {
status *api.PodStatus
pods []*api.Pod
inputCIDRs []string
expectResetCIDRs []string
cacheStatus bool
expectedCalls []string
name string
}{
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodRunning,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
expectedCalls: []string{"GetPodStatus"},
name: "pod running",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodRunning,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"2.3.4.5/32", "5.6.7.8/32"},
expectedCalls: []string{},
cacheStatus: true,
name: "pod running with cache",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodFailed,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectedCalls: []string{"GetPodStatus"},
name: "pod not running",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodFailed,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "10M",
},
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectedCalls: []string{},
cacheStatus: true,
name: "pod not running with cache",
},
{
status: &api.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodRunning,
},
pods: []*api.Pod{
{
ObjectMeta: api.ObjectMeta{
Name: "foo",
},
},
{
ObjectMeta: api.ObjectMeta{
Name: "bar",
},
},
},
inputCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
expectResetCIDRs: []string{"1.2.3.4/32", "2.3.4.5/32", "5.6.7.8/32"},
name: "no bandwidth limits",
},
}
for _, test := range tests {
shaper := &bandwidth.FakeShaper{
CIDRs: test.inputCIDRs,
}
testKube := newTestKubelet(t)
testKube.kubelet.shaper = shaper
testKube.fakeRuntime.PodStatus = *test.status
if test.cacheStatus {
for _, pod := range test.pods {
testKube.kubelet.statusManager.SetPodStatus(pod, *test.status)
}
}
err := testKube.kubelet.cleanupBandwidthLimits(test.pods)
if err != nil {
t.Errorf("unexpected error: %v (%s)", test.name)
}
if !reflect.DeepEqual(shaper.ResetCIDRs, test.expectResetCIDRs) {
t.Errorf("[%s]\nexpected: %v, saw: %v", test.name, test.expectResetCIDRs, shaper.ResetCIDRs)
}
if test.cacheStatus {
if len(testKube.fakeRuntime.CalledFunctions) != 0 {
t.Errorf("unexpected function calls: %v", testKube.fakeRuntime.CalledFunctions)
}
} else if !reflect.DeepEqual(testKube.fakeRuntime.CalledFunctions, test.expectedCalls) {
t.Errorf("[%s], expected %v, saw %v", test.name, test.expectedCalls, testKube.fakeRuntime.CalledFunctions)
}
}
}
func TestExtractBandwidthResources(t *testing.T) {
four, _ := resource.ParseQuantity("4M")
ten, _ := resource.ParseQuantity("10M")
twenty, _ := resource.ParseQuantity("20M")
tests := []struct {
pod *api.Pod
expectedIngress *resource.Quantity
expectedEgress *resource.Quantity
expectError bool
}{
{
pod: &api.Pod{},
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "10M",
},
},
},
expectedIngress: ten,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubernetes.io/egress-bandwidth": "10M",
},
},
},
expectedEgress: ten,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "4M",
"kubernetes.io/egress-bandwidth": "20M",
},
},
},
expectedIngress: four,
expectedEgress: twenty,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
Annotations: map[string]string{
"kubernetes.io/ingress-bandwidth": "foo",
},
},
},
expectError: true,
},
}
for _, test := range tests {
ingress, egress, err := extractBandwidthResources(test.pod)
if test.expectError {
if err == nil {
t.Errorf("unexpected non-error")
}
continue
}
if err != nil {
t.Errorf("unexpected error: %v", err)
continue
}
if !reflect.DeepEqual(ingress, test.expectedIngress) {
t.Errorf("expected: %v, saw: %v", ingress, test.expectedIngress)
}
if !reflect.DeepEqual(egress, test.expectedEgress) {
t.Errorf("expected: %v, saw: %v", egress, test.expectedEgress)
}
}
}<|fim▁end|>
|
kubelet.podKillingCh = make(chan *kubecontainer.Pod, 20)
return &TestKubelet{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient}
|
<|file_name|>properties_methods1.js<|end_file_name|><|fim▁begin|>/** @constructor */
function RecordSet(f) {
/** @function */
this.write = f;
}
/*
[
{
symbols: [
{
doc: { tags: [] },
returns: [],
type: "",
properties: [],
isa: "CONSTRUCTOR",
desc: "",
alias: "RecordSet",
memberof: "",
params: [
{ title: "param", desc: "", type: "", name: "f", isOptional: false }
],
methods: [ { name: "write", alias: "RecordSet.write" } ],
name: "RecordSet"
},
{
doc: { tags: [] },
returns: [],
type: "",
properties: [],
isa: "FUNCTION",
desc: "",
alias: "RecordSet.write",
memberof: "RecordSet",
params: [],
methods: [],
name: "write"
}
],
overview: {
doc: { tags: [] },
returns: [],
type: "",
properties: [],
isa: "FILE",
desc: "No overview provided.",
alias: "examples/data/properties_methods1.js",
memberof: "",
params: [],
methods: [],
name: "examples/data/properties_methods1.js"
}
}
]<|fim▁hole|><|fim▁end|>
|
*/
|
<|file_name|>authUser.js<|end_file_name|><|fim▁begin|>var mongoose = require('mongoose');
var bcrypt = require('bcrypt-nodejs');
// define the schema for our user model
var authUserSchema = mongoose.Schema({
unique_ID : String,
username : String,
password : String,
role : String,
first_name : String,
last_name : String
});
// methods ======================
// generating a hash<|fim▁hole|> return bcrypt.hashSync(password, bcrypt.genSaltSync(8), null);
};
// checking if password is valid
authUserSchema.methods.validPassword = function(password) {
return bcrypt.compareSync(password, this.password);
};
// create the model for users and expose it to our app
module.exports = mongoose.model('authUser', authUserSchema);<|fim▁end|>
|
authUserSchema.methods.generateHash = function(password) {
|
<|file_name|>parser.rs<|end_file_name|><|fim▁begin|>extern crate thunderdome;
#[cfg(test)]
mod parser_tests {
use thunderdome::parser::*;
fn validate(q: &str) -> Option<ParsedGraphQuery> {
let result = pre_parse(q);
assert!(result.is_ok());
result.ok()
}
#[test]
fn global_graph_query() {
validate("g.V()");<|fim▁hole|> }
#[test]
fn vertex_query() {
validate("g.v(1)");
validate("g.v(1,2)");
validate("g.v(1, 2)");
}
#[test]
fn simple_step_test() {
validate("g.v(1).outV()");
let result = validate("g.v(1).outV().inE()").unwrap();
assert_eq!(result.steps.len(), 3);
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
let step2 = result.steps.get(2).unwrap();
assert_eq!(step2.name, "inE".to_string());
}
#[test]
fn test_args() {
let result = validate("g.v(1).outV('edge').has('age', 30)").unwrap();
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
// make sure the arg is edge. should be a string and unquoted
match step1.args.get(0).unwrap() {
&Arg::String(ref x) if *x == "edge".to_string() => {},
&Arg::String(ref x) => panic!("{}", x),
x => { panic!("wrong type") }
}
}
#[test]
fn test_args_numbers() {
// maybe a weird offset command?
validate("g.V().limit(10, 20)");
let result = validate("g.V().limit(10.0)").unwrap();
match result.steps.get(1).unwrap().args.get(0).unwrap() {
&Arg::Float(ref x) if *x == 10.0 => { },
_ => { panic!("OH NOES")}
}
}
}<|fim▁end|>
| |
<|file_name|>txdb-leveldb.cpp<|end_file_name|><|fim▁begin|>// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file license.txt or http://www.opensource.org/licenses/mit-license.php.
#include <map>
#include <boost/version.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
#include <leveldb/env.h>
#include <leveldb/cache.h>
#include <leveldb/filter_policy.h>
#include <memenv/memenv.h>
#include "kernel.h"
#include "checkpoints.h"
#include "txdb.h"
#include "util.h"
#include "main.h"
#include "chainparams.h"
using namespace std;
using namespace boost;
leveldb::DB *txdb; // global pointer for LevelDB object instance
static leveldb::Options GetOptions() {
leveldb::Options options;
int nCacheSizeMB = GetArg("-dbcache", 25);
options.block_cache = leveldb::NewLRUCache(nCacheSizeMB * 1048576);
options.filter_policy = leveldb::NewBloomFilterPolicy(10);
return options;
}
void init_blockindex(leveldb::Options& options, bool fRemoveOld = false) {
// First time init.
filesystem::path directory = GetDataDir() / "txleveldb";
if (fRemoveOld) {
filesystem::remove_all(directory); // remove directory
unsigned int nFile = 1;
while (true)
{
filesystem::path strBlockFile = GetDataDir() / strprintf("blk%04u.dat", nFile);
// Break if no such file
if( !filesystem::exists( strBlockFile ) )
break;
filesystem::remove(strBlockFile);
nFile++;
}
}
filesystem::create_directory(directory);
LogPrintf("Opening LevelDB in %s\n", directory.string());
leveldb::Status status = leveldb::DB::Open(options, directory.string(), &txdb);
if (!status.ok()) {
throw runtime_error(strprintf("init_blockindex(): error opening database environment %s", status.ToString()));
}
}
// CDB subclasses are created and destroyed VERY OFTEN. That's why
// we shouldn't treat this as a free operations.
CTxDB::CTxDB(const char* pszMode)
{
assert(pszMode);
activeBatch = NULL;
fReadOnly = (!strchr(pszMode, '+') && !strchr(pszMode, 'w'));
if (txdb) {
pdb = txdb;
return;
}
bool fCreate = strchr(pszMode, 'c');
options = GetOptions();
options.create_if_missing = fCreate;
options.filter_policy = leveldb::NewBloomFilterPolicy(10);
init_blockindex(options); // Init directory
pdb = txdb;
if (Exists(string("version")))
{
ReadVersion(nVersion);
LogPrintf("Transaction index version is %d\n", nVersion);
if (nVersion < DATABASE_VERSION)
{
LogPrintf("Required index version is %d, removing old database\n", DATABASE_VERSION);
// Leveldb instance destruction
delete txdb;
txdb = pdb = NULL;
delete activeBatch;
activeBatch = NULL;
init_blockindex(options, true); // Remove directory and create new database
pdb = txdb;
bool fTmp = fReadOnly;
fReadOnly = false;
WriteVersion(DATABASE_VERSION); // Save transaction index version
fReadOnly = fTmp;
}
}
else if (fCreate)
{
bool fTmp = fReadOnly;
fReadOnly = false;
WriteVersion(DATABASE_VERSION);
fReadOnly = fTmp;
}
LogPrintf("Opened LevelDB successfully\n");
}
void CTxDB::Close()
{
delete txdb;
txdb = pdb = NULL;
delete options.filter_policy;
options.filter_policy = NULL;
delete options.block_cache;
options.block_cache = NULL;
delete activeBatch;
activeBatch = NULL;
}
bool CTxDB::TxnBegin()
{
assert(!activeBatch);
activeBatch = new leveldb::WriteBatch();
return true;
}
bool CTxDB::TxnCommit()
{
assert(activeBatch);
leveldb::Status status = pdb->Write(leveldb::WriteOptions(), activeBatch);
delete activeBatch;
activeBatch = NULL;
if (!status.ok()) {
LogPrintf("LevelDB batch commit failure: %s\n", status.ToString());
return false;
}
return true;
}
class CBatchScanner : public leveldb::WriteBatch::Handler {
public:
std::string needle;
bool *deleted;
std::string *foundValue;
bool foundEntry;<|fim▁hole|> if (key.ToString() == needle) {
foundEntry = true;
*deleted = false;
*foundValue = value.ToString();
}
}
virtual void Delete(const leveldb::Slice& key) {
if (key.ToString() == needle) {
foundEntry = true;
*deleted = true;
}
}
};
// When performing a read, if we have an active batch we need to check it first
// before reading from the database, as the rest of the code assumes that once
// a database transaction begins reads are consistent with it. It would be good
// to change that assumption in future and avoid the performance hit, though in
// practice it does not appear to be large.
bool CTxDB::ScanBatch(const CDataStream &key, string *value, bool *deleted) const {
assert(activeBatch);
*deleted = false;
CBatchScanner scanner;
scanner.needle = key.str();
scanner.deleted = deleted;
scanner.foundValue = value;
leveldb::Status status = activeBatch->Iterate(&scanner);
if (!status.ok()) {
throw runtime_error(status.ToString());
}
return scanner.foundEntry;
}
bool CTxDB::ReadTxIndex(uint256 hash, CTxIndex& txindex)
{
txindex.SetNull();
return Read(make_pair(string("tx"), hash), txindex);
}
bool CTxDB::UpdateTxIndex(uint256 hash, const CTxIndex& txindex)
{
return Write(make_pair(string("tx"), hash), txindex);
}
bool CTxDB::AddTxIndex(const CTransaction& tx, const CDiskTxPos& pos, int nHeight)
{
// Add to tx index
uint256 hash = tx.GetHash();
CTxIndex txindex(pos, tx.vout.size());
return Write(make_pair(string("tx"), hash), txindex);
}
bool CTxDB::EraseTxIndex(const CTransaction& tx)
{
uint256 hash = tx.GetHash();
return Erase(make_pair(string("tx"), hash));
}
bool CTxDB::ContainsTx(uint256 hash)
{
return Exists(make_pair(string("tx"), hash));
}
bool CTxDB::ReadDiskTx(uint256 hash, CTransaction& tx, CTxIndex& txindex)
{
tx.SetNull();
if (!ReadTxIndex(hash, txindex))
return false;
return (tx.ReadFromDisk(txindex.pos));
}
bool CTxDB::ReadDiskTx(uint256 hash, CTransaction& tx)
{
CTxIndex txindex;
return ReadDiskTx(hash, tx, txindex);
}
bool CTxDB::ReadDiskTx(COutPoint outpoint, CTransaction& tx, CTxIndex& txindex)
{
return ReadDiskTx(outpoint.hash, tx, txindex);
}
bool CTxDB::ReadDiskTx(COutPoint outpoint, CTransaction& tx)
{
CTxIndex txindex;
return ReadDiskTx(outpoint.hash, tx, txindex);
}
bool CTxDB::WriteBlockIndex(const CDiskBlockIndex& blockindex)
{
return Write(make_pair(string("blockindex"), blockindex.GetBlockHash()), blockindex);
}
bool CTxDB::ReadHashBestChain(uint256& hashBestChain)
{
return Read(string("hashBestChain"), hashBestChain);
}
bool CTxDB::WriteHashBestChain(uint256 hashBestChain)
{
return Write(string("hashBestChain"), hashBestChain);
}
bool CTxDB::ReadBestInvalidTrust(CBigNum& bnBestInvalidTrust)
{
return Read(string("bnBestInvalidTrust"), bnBestInvalidTrust);
}
bool CTxDB::WriteBestInvalidTrust(CBigNum bnBestInvalidTrust)
{
return Write(string("bnBestInvalidTrust"), bnBestInvalidTrust);
}
bool CTxDB::ReadSyncCheckpoint(uint256& hashCheckpoint)
{
return Read(string("hashSyncCheckpoint"), hashCheckpoint);
}
bool CTxDB::WriteSyncCheckpoint(uint256 hashCheckpoint)
{
return Write(string("hashSyncCheckpoint"), hashCheckpoint);
}
bool CTxDB::ReadCheckpointPubKey(string& strPubKey)
{
return Read(string("strCheckpointPubKey"), strPubKey);
}
bool CTxDB::WriteCheckpointPubKey(const string& strPubKey)
{
return Write(string("strCheckpointPubKey"), strPubKey);
}
static CBlockIndex *InsertBlockIndex(uint256 hash)
{
if (hash == 0)
return NULL;
// Return existing
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end())
return (*mi).second;
// Create new
CBlockIndex* pindexNew = new CBlockIndex();
if (!pindexNew)
throw runtime_error("LoadBlockIndex() : new CBlockIndex failed");
mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
return pindexNew;
}
bool CTxDB::LoadBlockIndex()
{
if (mapBlockIndex.size() > 0) {
// Already loaded once in this session. It can happen during migration
// from BDB.
return true;
}
// The block index is an in-memory structure that maps hashes to on-disk
// locations where the contents of the block can be found. Here, we scan it
// out of the DB and into mapBlockIndex.
leveldb::Iterator *iterator = pdb->NewIterator(leveldb::ReadOptions());
// Seek to start key.
CDataStream ssStartKey(SER_DISK, CLIENT_VERSION);
ssStartKey << make_pair(string("blockindex"), uint256(0));
iterator->Seek(ssStartKey.str());
// Now read each entry.
while (iterator->Valid())
{
boost::this_thread::interruption_point();
// Unpack keys and values.
CDataStream ssKey(SER_DISK, CLIENT_VERSION);
ssKey.write(iterator->key().data(), iterator->key().size());
CDataStream ssValue(SER_DISK, CLIENT_VERSION);
ssValue.write(iterator->value().data(), iterator->value().size());
string strType;
ssKey >> strType;
// Did we reach the end of the data to read?
if (strType != "blockindex")
break;
CDiskBlockIndex diskindex;
ssValue >> diskindex;
uint256 blockHash = diskindex.GetBlockHash();
// Construct block index object
CBlockIndex* pindexNew = InsertBlockIndex(blockHash);
pindexNew->pprev = InsertBlockIndex(diskindex.hashPrev);
pindexNew->pnext = InsertBlockIndex(diskindex.hashNext);
pindexNew->nFile = diskindex.nFile;
pindexNew->nBlockPos = diskindex.nBlockPos;
pindexNew->nHeight = diskindex.nHeight;
pindexNew->nMint = diskindex.nMint;
pindexNew->nMoneySupply = diskindex.nMoneySupply;
pindexNew->nFlags = diskindex.nFlags;
pindexNew->nStakeModifier = diskindex.nStakeModifier;
pindexNew->prevoutStake = diskindex.prevoutStake;
pindexNew->nStakeTime = diskindex.nStakeTime;
pindexNew->hashProof = diskindex.hashProof;
pindexNew->nVersion = diskindex.nVersion;
pindexNew->hashMerkleRoot = diskindex.hashMerkleRoot;
pindexNew->nTime = diskindex.nTime;
pindexNew->nBits = diskindex.nBits;
pindexNew->nNonce = diskindex.nNonce;
// Watch for genesis block
if (pindexGenesisBlock == NULL && blockHash == Params().HashGenesisBlock())
pindexGenesisBlock = pindexNew;
if (!pindexNew->CheckIndex()) {
delete iterator;
return error("LoadBlockIndex() : CheckIndex failed at %d", pindexNew->nHeight);
}
// xRadon: build setStakeSeen
if (pindexNew->IsProofOfStake())
setStakeSeen.insert(make_pair(pindexNew->prevoutStake, pindexNew->nStakeTime));
iterator->Next();
}
delete iterator;
boost::this_thread::interruption_point();
// Calculate nChainTrust
vector<pair<int, CBlockIndex*> > vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
vSortedByHeight.push_back(make_pair(pindex->nHeight, pindex));
}
sort(vSortedByHeight.begin(), vSortedByHeight.end());
BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight)
{
CBlockIndex* pindex = item.second;
pindex->nChainTrust = (pindex->pprev ? pindex->pprev->nChainTrust : 0) + pindex->GetBlockTrust();
}
// Load hashBestChain pointer to end of best chain
if (!ReadHashBestChain(hashBestChain))
{
if (pindexGenesisBlock == NULL)
return true;
return error("CTxDB::LoadBlockIndex() : hashBestChain not loaded");
}
if (!mapBlockIndex.count(hashBestChain))
return error("CTxDB::LoadBlockIndex() : hashBestChain not found in the block index");
pindexBest = mapBlockIndex[hashBestChain];
nBestHeight = pindexBest->nHeight;
nBestChainTrust = pindexBest->nChainTrust;
LogPrintf("LoadBlockIndex(): hashBestChain=%s height=%d trust=%s date=%s\n",
hashBestChain.ToString(), nBestHeight, CBigNum(nBestChainTrust).ToString(),
DateTimeStrFormat("%x %H:%M:%S", pindexBest->GetBlockTime()));
// xRadon: load hashSyncCheckpoint
if (!ReadSyncCheckpoint(Checkpoints::hashSyncCheckpoint))
return error("CTxDB::LoadBlockIndex() : hashSyncCheckpoint not loaded");
LogPrintf("LoadBlockIndex(): synchronized checkpoint %s\n", Checkpoints::hashSyncCheckpoint.ToString());
// Load bnBestInvalidTrust, OK if it doesn't exist
CBigNum bnBestInvalidTrust;
ReadBestInvalidTrust(bnBestInvalidTrust);
nBestInvalidTrust = bnBestInvalidTrust.getuint256();
// Verify blocks in the best chain
int nCheckLevel = GetArg("-checklevel", 1);
int nCheckDepth = GetArg( "-checkblocks", 500);
if (nCheckDepth == 0)
nCheckDepth = 1000000000; // suffices until the year 19000
if (nCheckDepth > nBestHeight)
nCheckDepth = nBestHeight;
LogPrintf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CBlockIndex* pindexFork = NULL;
map<pair<unsigned int, unsigned int>, CBlockIndex*> mapBlockPos;
for (CBlockIndex* pindex = pindexBest; pindex && pindex->pprev; pindex = pindex->pprev)
{
boost::this_thread::interruption_point();
if (pindex->nHeight < nBestHeight-nCheckDepth)
break;
CBlock block;
if (!block.ReadFromDisk(pindex))
return error("LoadBlockIndex() : block.ReadFromDisk failed");
// check level 1: verify block validity
// check level 7: verify block signature too
if (nCheckLevel>0 && !block.CheckBlock(true, true, (nCheckLevel>6)))
{
LogPrintf("LoadBlockIndex() : *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
pindexFork = pindex->pprev;
}
// check level 2: verify transaction index validity
if (nCheckLevel>1)
{
pair<unsigned int, unsigned int> pos = make_pair(pindex->nFile, pindex->nBlockPos);
mapBlockPos[pos] = pindex;
BOOST_FOREACH(const CTransaction &tx, block.vtx)
{
uint256 hashTx = tx.GetHash();
CTxIndex txindex;
if (ReadTxIndex(hashTx, txindex))
{
// check level 3: checker transaction hashes
if (nCheckLevel>2 || pindex->nFile != txindex.pos.nFile || pindex->nBlockPos != txindex.pos.nBlockPos)
{
// either an error or a duplicate transaction
CTransaction txFound;
if (!txFound.ReadFromDisk(txindex.pos))
{
LogPrintf("LoadBlockIndex() : *** cannot read mislocated transaction %s\n", hashTx.ToString());
pindexFork = pindex->pprev;
}
else
if (txFound.GetHash() != hashTx) // not a duplicate tx
{
LogPrintf("LoadBlockIndex(): *** invalid tx position for %s\n", hashTx.ToString());
pindexFork = pindex->pprev;
}
}
// check level 4: check whether spent txouts were spent within the main chain
unsigned int nOutput = 0;
if (nCheckLevel>3)
{
BOOST_FOREACH(const CDiskTxPos &txpos, txindex.vSpent)
{
if (!txpos.IsNull())
{
pair<unsigned int, unsigned int> posFind = make_pair(txpos.nFile, txpos.nBlockPos);
if (!mapBlockPos.count(posFind))
{
LogPrintf("LoadBlockIndex(): *** found bad spend at %d, hashBlock=%s, hashTx=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString(), hashTx.ToString());
pindexFork = pindex->pprev;
}
// check level 6: check whether spent txouts were spent by a valid transaction that consume them
if (nCheckLevel>5)
{
CTransaction txSpend;
if (!txSpend.ReadFromDisk(txpos))
{
LogPrintf("LoadBlockIndex(): *** cannot read spending transaction of %s:%i from disk\n", hashTx.ToString(), nOutput);
pindexFork = pindex->pprev;
}
else if (!txSpend.CheckTransaction())
{
LogPrintf("LoadBlockIndex(): *** spending transaction of %s:%i is invalid\n", hashTx.ToString(), nOutput);
pindexFork = pindex->pprev;
}
else
{
bool fFound = false;
BOOST_FOREACH(const CTxIn &txin, txSpend.vin)
if (txin.prevout.hash == hashTx && txin.prevout.n == nOutput)
fFound = true;
if (!fFound)
{
LogPrintf("LoadBlockIndex(): *** spending transaction of %s:%i does not spend it\n", hashTx.ToString(), nOutput);
pindexFork = pindex->pprev;
}
}
}
}
nOutput++;
}
}
}
// check level 5: check whether all prevouts are marked spent
if (nCheckLevel>4)
{
BOOST_FOREACH(const CTxIn &txin, tx.vin)
{
CTxIndex txindex;
if (ReadTxIndex(txin.prevout.hash, txindex))
if (txindex.vSpent.size()-1 < txin.prevout.n || txindex.vSpent[txin.prevout.n].IsNull())
{
LogPrintf("LoadBlockIndex(): *** found unspent prevout %s:%i in %s\n", txin.prevout.hash.ToString(), txin.prevout.n, hashTx.ToString());
pindexFork = pindex->pprev;
}
}
}
}
}
}
if (pindexFork)
{
boost::this_thread::interruption_point();
// Reorg back to the fork
LogPrintf("LoadBlockIndex() : *** moving best chain pointer back to block %d\n", pindexFork->nHeight);
CBlock block;
if (!block.ReadFromDisk(pindexFork))
return error("LoadBlockIndex() : block.ReadFromDisk failed");
CTxDB txdb;
block.SetBestChain(txdb, pindexFork);
}
return true;
}<|fim▁end|>
|
CBatchScanner() : foundEntry(false) {}
virtual void Put(const leveldb::Slice& key, const leveldb::Slice& value) {
|
<|file_name|>transform.rs<|end_file_name|><|fim▁begin|>//The forward dct's output coefficients are scaled by 8
//The inverse dct's output samples are clamped to the range [0, 255]
fn level_shift_up(a: i32) -> u8 {
if a < -128 {0u8}
else if a > 127 {255u8}
else {a as u8 + 128u8}
}
/*
idct and fdct are Rust translations of jfdctint.c and jidctint.c from the
Independent JPEG Group's libjpeg version 9a
obtained from http://www.ijg.org/files/jpegsr9a.zip
They come with the following conditions of ditstribution and use:
In plain English:
1. We don't promise that this software works. (But if you find any bugs,
please let us know!)
2. You can use this software for whatever you want. You don't have to pay us.
3. You may not pretend that you wrote this software. If you use it in a
program, you must acknowledge somewhere in your documentation that
you've used the IJG code.
In legalese:
The authors make NO WARRANTY or representation, either express or implied,
with respect to this software, its quality, accuracy, merchantability, or
fitness for a particular purpose. This software is provided "AS IS", and you,
its user, assume the entire risk as to its quality and accuracy.
This software is copyright (C) 1991-2014, Thomas G. Lane, Guido Vollbeding.
All Rights Reserved except as specified below.
Permission is hereby granted to use, copy, modify, and distribute this
software (or portions thereof) for any purpose, without fee, subject to these
conditions:
(1) If any part of the source code for this software is distributed, then this
README file must be included, with this copyright and no-warranty notice
unaltered; and any additions, deletions, or changes to the original files
must be clearly indicated in accompanying documentation.
(2) If only executable code is distributed, then the accompanying
documentation must state that "this software is based in part on the work of
the Independent JPEG Group".
(3) Permission for use of this software is granted only if the user accepts
full responsibility for any undesirable consequences; the authors accept
NO LIABILITY for damages of any kind.
These conditions apply to any software derived from or based on the IJG code,
not just to the unmodified library. If you use our work, you ought to
acknowledge us.
Permission is NOT granted for the use of any IJG author's name or company name
in advertising or publicity relating to this software or products derived from
it. This software may be referred to only as "the Independent JPEG Group's
software".
We specifically permit and encourage the use of this software as the basis of
commercial products, provided that all warranty or liability claims are
assumed by the product vendor.
*/
static CONST_BITS: i32 = 13;
static PASS1_BITS: i32 = 2;
static FIX_0_298631336: i32 = 2446;
static FIX_0_390180644: i32 = 3196;
static FIX_0_541196100: i32 = 4433;
static FIX_0_765366865: i32 = 6270;
static FIX_0_899976223: i32 = 7373;
static FIX_1_175875602: i32 = 9633;
static FIX_1_501321110: i32 = 12299;
static FIX_1_847759065: i32 = 15137;
static FIX_1_961570560: i32 = 16069;
static FIX_2_053119869: i32 = 16819;
static FIX_2_562915447: i32 = 20995;
static FIX_3_072711026: i32 = 25172;
pub fn fdct(samples: &[u8], coeffs: &mut [i32]) {
//Pass 1: process rows.
//Results are scaled by sqrt(8) compared to a true DCT
//furthermore we scale the results by 2**PASS1_BITS
for y in range(0u, 8) {
let y0 = y * 8;
//Even part
let t0 = samples[y0 + 0] as i32 + samples[y0 + 7] as i32;
let t1 = samples[y0 + 1] as i32 + samples[y0 + 6] as i32;
let t2 = samples[y0 + 2] as i32 + samples[y0 + 5] as i32;
let t3 = samples[y0 + 3] as i32 + samples[y0 + 4] as i32;
let t10 = t0 + t3;
let t12 = t0 - t3;
let t11 = t1 + t2;
let t13 = t1 - t2;
let t0 = samples[y0 + 0] as i32 - samples[y0 + 7] as i32;
let t1 = samples[y0 + 1] as i32 - samples[y0 + 6] as i32;
let t2 = samples[y0 + 2] as i32 - samples[y0 + 5] as i32;
let t3 = samples[y0 + 3] as i32 - samples[y0 + 4] as i32;
//Apply unsigned -> signed conversion
coeffs[y0 + 0] = (t10 + t11 - 8 * 128) << PASS1_BITS as uint;
coeffs[y0 + 4] = (t10 - t11) << PASS1_BITS as uint;
let mut z1 = (t12 + t13) * FIX_0_541196100;
//Add fudge factor here for final descale
z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as uint;
coeffs[y0 + 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS - PASS1_BITS) as uint;
coeffs[y0 + 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS - PASS1_BITS) as uint;
//Odd part
let t12 = t0 + t2;
let t13 = t1 + t3;
let mut z1 = (t12 + t13) * FIX_1_175875602;
//Add fudge factor here for final descale
z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as uint;
let mut t12 = t12 * (-FIX_0_390180644);
let mut t13 = t13 * (-FIX_1_961570560);
t12 += z1;
t13 += z1;
let z1 = (t0 + t3) * (-FIX_0_899976223);
let mut t0 = t0 * FIX_1_501321110;
let mut t3 = t3 * FIX_0_298631336;
t0 += z1 + t12;
t3 += z1 + t13;
let z1 = (t1 + t2) * (-FIX_2_562915447);
let mut t1 = t1 * FIX_3_072711026;
let mut t2 = t2 * FIX_2_053119869;
t1 += z1 + t13;
t2 += z1 + t12;
coeffs[y0 + 1] = t0 >> (CONST_BITS - PASS1_BITS) as uint;
coeffs[y0 + 3] = t1 >> (CONST_BITS - PASS1_BITS) as uint;
coeffs[y0 + 5] = t2 >> (CONST_BITS - PASS1_BITS) as uint;
coeffs[y0 + 7] = t3 >> (CONST_BITS - PASS1_BITS) as uint;
}
//Pass 2: process columns
//We remove the PASS1_BITS scaling but leave the results scaled up an
//overall factor of 8
for x in range(0u, 8).rev() {
//Even part
let t0 = coeffs[x + 8 * 0] + coeffs[x + 8 * 7];
let t1 = coeffs[x + 8 * 1] + coeffs[x + 8 * 6];
let t2 = coeffs[x + 8 * 2] + coeffs[x + 8 * 5];
let t3 = coeffs[x + 8 * 3] + coeffs[x + 8 * 4];
//Add fudge factor here for final descale
let t10 = t0 + t3 + (1 << (PASS1_BITS - 1) as uint);<|fim▁hole|> let t0 = coeffs[x + 8 * 0] - coeffs[x + 8 * 7];
let t1 = coeffs[x + 8 * 1] - coeffs[x + 8 * 6];
let t2 = coeffs[x + 8 * 2] - coeffs[x + 8 * 5];
let t3 = coeffs[x + 8 * 3] - coeffs[x + 8 * 4];
coeffs[x + 8 * 0] = (t10 + t11) >> PASS1_BITS as uint;
coeffs[x + 8 * 4] = (t10 - t11) >> PASS1_BITS as uint;
let mut z1 = (t12 + t13) * FIX_0_541196100;
//Add fudge factor here for final descale
z1 += 1 << (CONST_BITS + PASS1_BITS - 1) as uint;
coeffs[x + 8 * 2] = (z1 + t12 * FIX_0_765366865) >> (CONST_BITS + PASS1_BITS) as uint;
coeffs[x + 8 * 6] = (z1 - t13 * FIX_1_847759065) >> (CONST_BITS + PASS1_BITS) as uint;
//Odd part
let t12 = t0 + t2;
let t13 = t1 + t3;
let mut z1 = (t12 + t13) * FIX_1_175875602;
//Add fudge factor here for final descale
z1 += 1 << (CONST_BITS - PASS1_BITS - 1) as uint;
let mut t12 = t12 * (-FIX_0_390180644);
let mut t13 = t13 * (-FIX_1_961570560);
t12 += z1;
t13 += z1;
let z1 = (t0 + t3) * (-FIX_0_899976223);
let mut t0 = t0 * FIX_1_501321110;
let mut t3 = t3 * FIX_0_298631336;
t0 += z1 + t12;
t3 += z1 + t13;
let z1 = (t1 + t2) * (-FIX_2_562915447);
let mut t1 = t1 * FIX_3_072711026;
let mut t2 = t2 * FIX_2_053119869;
t1 += z1 + t13;
t2 += z1 + t12;
coeffs[x + 8 * 1] = t0 >> (CONST_BITS + PASS1_BITS) as uint;
coeffs[x + 8 * 3] = t1 >> (CONST_BITS + PASS1_BITS) as uint;
coeffs[x + 8 * 5] = t2 >> (CONST_BITS + PASS1_BITS) as uint;
coeffs[x + 8 * 7] = t3 >> (CONST_BITS + PASS1_BITS) as uint;
}
}
pub fn idct(coeffs: &[i32], samples: &mut [u8]) {
let mut tmp = [0i32, ..64];
for x in range(0u, 8).rev() {
if coeffs[x + 8 * 1] == 0 && coeffs[x + 8 * 2] == 0 && coeffs[x + 8 * 3] == 0 &&
coeffs[x + 8 * 4] == 0 && coeffs[x + 8 * 5] == 0 && coeffs[x + 8 * 6] == 0 &&
coeffs[x + 8 * 7] == 0 {
let dcval = coeffs[x + 8 * 0] << PASS1_BITS as uint;
tmp[x + 8 * 0] = dcval;
tmp[x + 8 * 1] = dcval;
tmp[x + 8 * 2] = dcval;
tmp[x + 8 * 3] = dcval;
tmp[x + 8 * 4] = dcval;
tmp[x + 8 * 5] = dcval;
tmp[x + 8 * 6] = dcval;
tmp[x + 8 * 7] = dcval;
continue
}
//Even part: reverse the even part of the forward DCT
let z2 = coeffs[x + 8 * 2];
let z3 = coeffs[x + 8 * 6];
let z1 = (z2 + z3) * FIX_0_541196100;
let t2 = z1 + z2 * FIX_0_765366865;
let t3 = z1 - z3 * FIX_1_847759065;
let mut z2 = coeffs[x + 8 * 0];
let mut z3 = coeffs[x + 8 * 4];
z2 <<= CONST_BITS as uint;
z3 <<= CONST_BITS as uint;
z2 += 1 << (CONST_BITS - PASS1_BITS - 1) as uint;
let t0 = z2 + z3;
let t1 = z2 - z3;
let t10 = t0 + t2;
let t13 = t0 - t2;
let t11 = t1 + t3;
let t12 = t1 - t3;
let t0 = coeffs[x + 8 * 7];
let t1 = coeffs[x + 8 * 5];
let t2 = coeffs[x + 8 * 3];
let t3 = coeffs[x + 8 * 1];
let z2 = t0 + t2;
let z3 = t1 + t3;
let z1 = (z2 + z3) * FIX_1_175875602;
let mut z2 = z2 * (-FIX_1_961570560);
let mut z3 = z3 * (-FIX_0_390180644);
z2 += z1;
z3 += z1;
let z1 = (t0 + t3) * (-FIX_0_899976223);
let mut t0 = t0 * FIX_0_298631336;
let mut t3 = t3 * FIX_1_501321110;
t0 += z1 + z2;
t3 += z1 + z3;
let z1 = (t1 + t2) * (-FIX_2_562915447);
let mut t1 = t1 * FIX_2_053119869;
let mut t2 = t2 * FIX_3_072711026;
t1 += z1 + z3;
t2 += z1 + z2;
tmp[x + 8 * 0] = (t10 + t3) >> (CONST_BITS - PASS1_BITS) as uint;
tmp[x + 8 * 7] = (t10 - t3) >> (CONST_BITS - PASS1_BITS) as uint;
tmp[x + 8 * 1] = (t11 + t2) >> (CONST_BITS - PASS1_BITS) as uint;
tmp[x + 8 * 6] = (t11 - t2) >> (CONST_BITS - PASS1_BITS) as uint;
tmp[x + 8 * 2] = (t12 + t1) >> (CONST_BITS - PASS1_BITS) as uint;
tmp[x + 8 * 5] = (t12 - t1) >> (CONST_BITS - PASS1_BITS) as uint;
tmp[x + 8 * 3] = (t13 + t0) >> (CONST_BITS - PASS1_BITS) as uint;
tmp[x + 8 * 4] = (t13 - t0) >> (CONST_BITS - PASS1_BITS) as uint;
}
for y in range(0u, 8) {
let y0 = y * 8;
let z2 = tmp[y0 + 2];
let z3 = tmp[y0 + 6];
let z1 = (z2 + z3) * FIX_0_541196100;
let t2 = z1 + z2 * FIX_0_765366865;
let t3 = z1 - z3 * FIX_1_847759065;
let z2 = tmp[y0 + 0] + (1 << (PASS1_BITS + 2) as uint);
let z3 = tmp[y0 + 4];
let t0 = (z2 + z3) << CONST_BITS as uint;
let t1 = (z2 - z3) << CONST_BITS as uint;
let t10 = t0 + t2;
let t13 = t0 - t2;
let t11 = t1 + t3;
let t12 = t1 - t3;
let t0 = tmp[y0 + 7];
let t1 = tmp[y0 + 5];
let t2 = tmp[y0 + 3];
let t3 = tmp[y0 + 1];
let z2 = t0 + t2;
let z3 = t1 + t3;
let z1 = (z2 + z3) * FIX_1_175875602;
let mut z2 = z2 * (-FIX_1_961570560);
let mut z3 = z3 * (-FIX_0_390180644);
z2 += z1;
z3 += z1;
let z1 = (t0 + t3) * (-FIX_0_899976223);
let mut t0 = t0 * FIX_0_298631336;
let mut t3 = t3 * FIX_1_501321110;
t0 += z1 + z2;
t3 += z1 + z3;
let z1 = (t1 + t2) * (-FIX_2_562915447);
let mut t1 = t1 * FIX_2_053119869;
let mut t2 = t2 * FIX_3_072711026;
t1 += z1 + z3;
t2 += z1 + z2;
let a = (t10 + t3) >> (CONST_BITS + PASS1_BITS + 3) as uint;
samples[y0 + 0] = level_shift_up(a);
let a = (t10 - t3) >> (CONST_BITS + PASS1_BITS + 3) as uint;
samples[y0 + 7] = level_shift_up(a);
let a = (t11 + t2) >> (CONST_BITS + PASS1_BITS + 3) as uint;
samples[y0 + 1] = level_shift_up(a);
let a = (t11 - t2) >> (CONST_BITS + PASS1_BITS + 3) as uint;
samples[y0 + 6] = level_shift_up(a);
let a = (t12 + t1) >> (CONST_BITS + PASS1_BITS + 3) as uint;
samples[y0 + 2] = level_shift_up(a);
let a = (t12 - t1) >> (CONST_BITS + PASS1_BITS + 3) as uint;
samples[y0 + 5] = level_shift_up(a);
let a = (t13 + t0) >> (CONST_BITS + PASS1_BITS + 3) as uint;
samples[y0 + 3] = level_shift_up(a);
let a = (t13 - t0) >> (CONST_BITS + PASS1_BITS + 3) as uint;
samples[y0 + 4] = level_shift_up(a);
}
}<|fim▁end|>
|
let t12 = t0 - t3;
let t11 = t1 + t2;
let t13 = t1 - t2;
|
<|file_name|>contrib.src.js<|end_file_name|><|fim▁begin|>/*! asynquence-contrib
v0.13.0 (c) Kyle Simpson
MIT License: http://getify.mit-license.org
*/
(function UMD(dependency,definition){
if (typeof module !== "undefined" && module.exports) {
// make dependency injection wrapper first
module.exports = function $$inject$dependency(dep) {
// only try to `require(..)` if dependency is a string module path
if (typeof dep == "string") {
try { dep = require(dep); }
catch (err) {
// dependency not yet fulfilled, so just return
// dependency injection wrapper again
return $$inject$dependency;
}
}
return definition(dep);
};
// if possible, immediately try to resolve wrapper
// (with peer dependency)
if (typeof dependency == "string") {
module.exports = module.exports( require("path").join("..",dependency) );
}
}
else if (typeof define == "function" && define.amd) { define([dependency],definition); }
else { definition(dependency); }
})(this.ASQ || "asynquence",function DEF(ASQ){
"use strict";
var ARRAY_SLICE = Array.prototype.slice,
ø = Object.create(null),
brand = "__ASQ__",
schedule = ASQ.__schedule,
tapSequence = ASQ.__tapSequence
;
function wrapGate(api,fns,success,failure,reset) {
fns = fns.map(function $$map(v,idx){
var def;
// tap any directly-provided sequences immediately
if (ASQ.isSequence(v)) {
def = { seq: v };
tapSequence(def);
return function $$fn(next) {
def.seq.val(function $$val(){
success(next,idx,ARRAY_SLICE.call(arguments));
})
.or(function $$or(){
failure(next,idx,ARRAY_SLICE.call(arguments));
});
};
}
else {
return function $$fn(next) {
var args = ARRAY_SLICE.call(arguments);
args[0] = function $$next() {
success(next,idx,ARRAY_SLICE.call(arguments));
};
args[0].fail = function $$fail() {
failure(next,idx,ARRAY_SLICE.call(arguments));
};
args[0].abort = function $$abort() {
reset();
};
args[0].errfcb = function $$errfcb(err) {
if (err) {
failure(next,idx,[err]);
}
else {
success(next,idx,ARRAY_SLICE.call(arguments,1));
}
};
v.apply(ø,args);
};
}
});
api.then(function $$then(){
var args = ARRAY_SLICE.call(arguments);
fns.forEach(function $$each(fn){
fn.apply(ø,args);
});
});
}
function isPromise(v) {
var val_type = typeof v;
return (
v !== null &&
(
val_type == "object" ||
val_type == "function"
) &&
!ASQ.isSequence(v) &&
// NOTE: `then` duck-typing of promises is stupid
typeof v.then == "function"
);
}
// "after"
ASQ.extend("after",function $$extend(api,internals){
return function $$after(num) {
var orig_args = arguments.length > 1 ?
ARRAY_SLICE.call(arguments,1) :
void 0
;
num = +num || 0;
api.then(function $$then(done){
var args = orig_args || ARRAY_SLICE.call(arguments,1);
setTimeout(function $$set$timeout(){
done.apply(ø,args);
},num);
});
return api;
};
});
ASQ.after = function $$after() {
return ASQ().after.apply(ø,arguments);
};
// "any"
ASQ.extend("any",function $$extend(api,internals){
return function $$any() {
if (internals("seq_error") || internals("seq_aborted") ||
arguments.length === 0
) {
return api;
}
var fns = ARRAY_SLICE.call(arguments);
api.then(function $$then(done){
function reset() {
finished = true;
error_messages.length = 0;
success_messages.length = 0;
}
function complete(trigger) {
if (success_messages.length > 0) {
// any successful segment's message(s) sent
// to main sequence to proceed as success
success_messages.length = fns.length;
trigger.apply(ø,success_messages);
}
else {
// send errors into main sequence
error_messages.length = fns.length;
trigger.fail.apply(ø,error_messages);
}
reset();
}
function success(trigger,idx,args) {
if (!finished) {
completed++;
success_messages[idx] =
args.length > 1 ?
ASQ.messages.apply(ø,args) :
args[0]
;
// all segments complete?
if (completed === fns.length) {
finished = true;
complete(trigger);
}
}
}
function failure(trigger,idx,args) {
if (!finished &&
!(idx in error_messages)
) {
completed++;
error_messages[idx] =
args.length > 1 ?
ASQ.messages.apply(ø,args) :
args[0]
;
}
// all segments complete?
if (!finished &&
completed === fns.length
) {
finished = true;
complete(trigger);
}
}
var completed = 0, error_messages = [], finished = false,
success_messages = [],
sq = ASQ.apply(ø,ARRAY_SLICE.call(arguments,1))
;
wrapGate(sq,fns,success,failure,reset);
sq.pipe(done);
});
return api;
};
});
// "errfcb"
ASQ.extend("errfcb",function $$extend(api,internals){
return function $$errfcb() {
// create a fake sequence to extract the callbacks
var sq = {
val: function $$then(cb){ sq.val_cb = cb; return sq; },
or: function $$or(cb){ sq.or_cb = cb; return sq; }
};
// trick `seq(..)`s checks for a sequence
sq[brand] = true;
// immediately register our fake sequence on the
// main sequence
api.seq(sq);
// provide the "error-first" callback
return function $$errorfirst$callback(err) {
if (err) {
sq.or_cb(err);
}
else {
sq.val_cb.apply(ø,ARRAY_SLICE.call(arguments,1));
}
};
};
});
// "failAfter"
ASQ.extend("failAfter",function $$extend(api,internals){
return function $$failAfter(num) {
var args = arguments.length > 1 ?
ARRAY_SLICE.call(arguments,1) :
void 0
;
num = +num || 0;
api.then(function $$then(done){
setTimeout(function $$set$timeout(){
done.fail.apply(ø,args);
},num);
});
return api;
};
});
ASQ.failAfter = function $$fail$after() {
return ASQ().failAfter.apply(ø,arguments);
};
// "first"
ASQ.extend("first",function $$extend(api,internals){
return function $$first() {
if (internals("seq_error") || internals("seq_aborted") ||
arguments.length === 0
) {
return api;
}
var fns = ARRAY_SLICE.call(arguments);
api.then(function $$then(done){
function reset() {
error_messages.length = 0;
}
function success(trigger,idx,args) {
if (!finished) {
finished = true;
// first successful segment triggers
// main sequence to proceed as success
trigger(
args.length > 1 ?
ASQ.messages.apply(ø,args) :
args[0]
);
reset();
}
}
function failure(trigger,idx,args) {
if (!finished &&
!(idx in error_messages)
) {
completed++;
error_messages[idx] =
args.length > 1 ?
ASQ.messages.apply(ø,args) :
args[0]
;
// all segments complete without success?
if (completed === fns.length) {
finished = true;
// send errors into main sequence
error_messages.length = fns.length;
trigger.fail.apply(ø,error_messages);
reset();
}
}
}
var completed = 0, error_messages = [], finished = false,
sq = ASQ.apply(ø,ARRAY_SLICE.call(arguments,1))
;
wrapGate(sq,fns,success,failure,reset);
sq.pipe(done);
});
return api;<|fim▁hole|>"use strict";
(function IIFE() {
// filter out already-resolved queue entries
function filterResolved(queue) {
return queue.filter(function $$filter(entry) {
return !entry.resolved;
});
}
function closeQueue(queue, finalValue) {
queue.forEach(function $$each(iter) {
if (!iter.resolved) {
iter.next();
iter.next(finalValue);
}
});
queue.length = 0;
}
function channel(bufSize) {
var ch = {
close: function $$close() {
ch.closed = true;
closeQueue(ch.put_queue, false);
closeQueue(ch.take_queue, ASQ.csp.CLOSED);
},
closed: false,
messages: [],
put_queue: [],
take_queue: [],
buffer_size: +bufSize || 0
};
return ch;
}
function unblock(iter) {
if (iter && !iter.resolved) {
iter.next(iter.next().value);
}
}
function put(channel, value) {
var ret;
if (channel.closed) {
return false;
}
// remove already-resolved entries
channel.put_queue = filterResolved(channel.put_queue);
channel.take_queue = filterResolved(channel.take_queue);
// immediate put?
if (channel.messages.length < channel.buffer_size) {
channel.messages.push(value);
unblock(channel.take_queue.shift());
return true;
}
// queued put
else {
channel.put_queue.push(
// make a notifiable iterable for 'put' blocking
ASQ.iterable().then(function $$then() {
if (!channel.closed) {
channel.messages.push(value);
return true;
} else {
return false;
}
}));
// wrap a sequence/promise around the iterable
ret = ASQ(channel.put_queue[channel.put_queue.length - 1]);
// take waiting on this queued put?
if (channel.take_queue.length > 0) {
unblock(channel.put_queue.shift());
unblock(channel.take_queue.shift());
}
return ret;
}
}
function putAsync(channel, value, cb) {
var ret = ASQ(put(channel, value));
if (cb && typeof cb == "function") {
ret.val(cb);
} else {
return ret;
}
}
function take(channel) {
var ret;
try {
ret = takem(channel);
} catch (err) {
ret = err;
}
if (ASQ.isSequence(ret)) {
ret.pCatch(function $$pcatch(err) {
return err;
});
}
return ret;
}
function takeAsync(channel, cb) {
var ret = ASQ(take(channel));
if (cb && typeof cb == "function") {
ret.val(cb);
} else {
return ret;
}
}
function takem(channel) {
var msg;
if (channel.closed) {
return ASQ.csp.CLOSED;
}
// remove already-resolved entries
channel.put_queue = filterResolved(channel.put_queue);
channel.take_queue = filterResolved(channel.take_queue);
// immediate take?
if (channel.messages.length > 0) {
msg = channel.messages.shift();
unblock(channel.put_queue.shift());
if (msg instanceof Error) {
throw msg;
}
return msg;
}
// queued take
else {
channel.take_queue.push(
// make a notifiable iterable for 'take' blocking
ASQ.iterable().then(function $$then() {
if (!channel.closed) {
var v = channel.messages.shift();
if (v instanceof Error) {
throw v;
}
return v;
} else {
return ASQ.csp.CLOSED;
}
}));
// wrap a sequence/promise around the iterable
msg = ASQ(channel.take_queue[channel.take_queue.length - 1]);
// put waiting on this take?
if (channel.put_queue.length > 0) {
unblock(channel.put_queue.shift());
unblock(channel.take_queue.shift());
}
return msg;
}
}
function takemAsync(channel, cb) {
var ret = ASQ(takem(channel));
if (cb && typeof cb == "function") {
ret.pThen(cb, cb);
} else {
return ret.val(function $$val(v) {
if (v instanceof Error) {
throw v;
}
return v;
});
}
}
function alts(actions) {
var closed,
open,
handlers,
i,
isq,
ret,
resolved = false;
// used `alts(..)` incorrectly?
if (!Array.isArray(actions) || actions.length == 0) {
throw Error("Invalid usage");
}
closed = [];
open = [];
handlers = [];
// separate actions by open/closed channel status
actions.forEach(function $$each(action) {
var channel = Array.isArray(action) ? action[0] : action;
// remove already-resolved entries
channel.put_queue = filterResolved(channel.put_queue);
channel.take_queue = filterResolved(channel.take_queue);
if (channel.closed) {
closed.push(channel);
} else {
open.push(action);
}
});
// if no channels are still open, we're done
if (open.length == 0) {
return { value: ASQ.csp.CLOSED, channel: closed };
}
// can any channel action be executed immediately?
for (i = 0; i < open.length; i++) {
// put action
if (Array.isArray(open[i])) {
// immediate put?
if (open[i][0].messages.length < open[i][0].buffer_size) {
return { value: put(open[i][0], open[i][1]), channel: open[i][0] };
}
}
// immediate take?
else if (open[i].messages.length > 0) {
return { value: take(open[i]), channel: open[i] };
}
}
isq = ASQ.iterable();
var ret = ASQ(isq);
// setup channel action handlers
for (i = 0; i < open.length; i++) {
(function iteration(action, channel, value) {
// put action?
if (Array.isArray(action)) {
channel = action[0];
value = action[1];
// define put handler
handlers.push(ASQ.iterable().then(function $$then() {
resolved = true;
// mark all handlers across this `alts(..)` as resolved now
handlers = handlers.filter(function $$filter(handler) {
return !(handler.resolved = true);
});
// channel still open?
if (!channel.closed) {
channel.messages.push(value);
isq.next({ value: true, channel: channel });
}
// channel already closed?
else {
isq.next({ value: false, channel: channel });
}
}));
// queue up put handler
channel.put_queue.push(handlers[handlers.length - 1]);
// take waiting on this queued put?
if (channel.take_queue.length > 0) {
schedule(function handleUnblocking() {
if (!resolved) {
unblock(channel.put_queue.shift());
unblock(channel.take_queue.shift());
}
}, 0);
}
}
// take action?
else {
channel = action;
// define take handler
handlers.push(ASQ.iterable().then(function $$then() {
resolved = true;
// mark all handlers across this `alts(..)` as resolved now
handlers = handlers.filter(function $$filter(handler) {
return !(handler.resolved = true);
});
// channel still open?
if (!channel.closed) {
isq.next({ value: channel.messages.shift(), channel: channel });
}
// channel already closed?
else {
isq.next({ value: ASQ.csp.CLOSED, channel: channel });
}
}));
// queue up take handler
channel.take_queue.push(handlers[handlers.length - 1]);
// put waiting on this queued take?
if (channel.put_queue.length > 0) {
schedule(function handleUnblocking() {
if (!resolved) {
unblock(channel.put_queue.shift());
unblock(channel.take_queue.shift());
}
});
}
}
})(open[i]);
}
return ret;
}
function altsAsync(chans, cb) {
var ret = ASQ(alts(channel));
if (cb && typeof cb == "function") {
ret.pThen(cb, cb);
} else {
return ret;
}
}
function timeout(delay) {
var ch = channel();
setTimeout(ch.close, delay);
return ch;
}
function go(gen, args) {
// goroutine arguments passed?
if (arguments.length > 1) {
if (!args || !Array.isArray(args)) {
args = [args];
}
} else {
args = [];
}
return regeneratorRuntime.mark(function $$go(token) {
var unblock, ret, msg, err, type, done, it;
return regeneratorRuntime.wrap(function $$go$(context$3$0) {
while (1) switch (context$3$0.prev = context$3$0.next) {
case 0:
unblock = function unblock() {
if (token.block && !token.block.marked) {
token.block.marked = true;
token.block.next();
}
};
done = false;
// keep track of how many goroutines are running
// so we can infer when we're done go'ing
token.go_count = (token.go_count || 0) + 1;
// need to initialize a set of goroutines?
if (token.go_count === 1) {
// create a default channel for these goroutines
token.channel = channel();
token.channel.messages = token.messages;
token.channel.go = function $$go() {
// unblock the goroutine handling for these
// new goroutine(s)?
unblock();
// add the goroutine(s) to the handling queue
token.add(go.apply(ø, arguments));
};
// starting out with initial channel messages?
if (token.channel.messages.length > 0) {
// fake back-pressure blocking for each
token.channel.put_queue = token.channel.messages.map(function $$map() {
// make a notifiable iterable for 'put' blocking
return ASQ.iterable().then(function $$then() {
unblock(token.channel.take_queue.shift());
return !token.channel.closed;
});
});
}
}
// initialize the generator
it = gen.apply(ø, [token.channel].concat(args));
(function iterate() {
function next() {
// keep going with next step in goroutine?
if (!done) {
iterate();
}
// unblock overall goroutine handling to
// continue with other goroutines
else {
unblock();
}
}
// has a resumption value been achieved yet?
if (!ret) {
// try to resume the goroutine
try {
// resume with injected exception?
if (err) {
ret = it["throw"](err);
err = null;
}
// resume normally
else {
ret = it.next(msg);
}
}
// resumption failed, so bail
catch (e) {
done = true;
err = e;
msg = null;
unblock();
return;
}
// keep track of the result of the resumption
done = ret.done;
ret = ret.value;
type = typeof ret;
// if this goroutine is complete, unblock the
// overall goroutine handling
if (done) {
unblock();
}
// received a thenable/promise back?
if (isPromise(ret)) {
ret = ASQ().promise(ret);
}
// wait for the value?
if (ASQ.isSequence(ret)) {
ret.val(function $$val() {
ret = null;
msg = arguments.length > 1 ? ASQ.messages.apply(ø, arguments) : arguments[0];
next();
}).or(function $$or() {
ret = null;
msg = arguments.length > 1 ? ASQ.messages.apply(ø, arguments) : arguments[0];
if (msg instanceof Error) {
err = msg;
msg = null;
}
next();
});
}
// immediate value, prepare it to go right back in
else {
msg = ret;
ret = null;
next();
}
}
})();
// keep this goroutine alive until completion
case 6:
if (done) {
context$3$0.next = 15;
break;
}
context$3$0.next = 9;
return token;
case 9:
if (!(!done && !token.block)) {
context$3$0.next = 13;
break;
}
context$3$0.next = 12;
return token.block = ASQ.iterable();
case 12:
token.block = false;
case 13:
context$3$0.next = 6;
break;
case 15:
// this goroutine is done now
token.go_count--;
// all goroutines done?
if (token.go_count === 0) {
// any lingering blocking need to be cleaned up?
unblock();
// capture any untaken messages
msg = ASQ.messages.apply(ø, token.messages);
// need to implicitly force-close channel?
if (token.channel && !token.channel.closed) {
token.channel.closed = true;
token.channel.put_queue.length = token.channel.take_queue.length = 0;
token.channel.close = token.channel.go = token.channel.messages = null;
}
token.channel = null;
}
// make sure leftover error or message are
// passed along
if (!err) {
context$3$0.next = 21;
break;
}
throw err;
case 21:
if (!(token.go_count === 0)) {
context$3$0.next = 25;
break;
}
return context$3$0.abrupt("return", msg);
case 25:
return context$3$0.abrupt("return", token);
case 26:
case "end":
return context$3$0.stop();
}
}, $$go, this);
});
}
ASQ.csp = {
chan: channel,
put: put,
putAsync: putAsync,
take: take,
takeAsync: takeAsync,
takem: takem,
takemAsync: takemAsync,
alts: alts,
altsAsync: altsAsync,
timeout: timeout,
go: go,
CLOSED: {}
};
})();
// unblock the overall goroutine handling
// transfer control to another goroutine
// need to block overall goroutine handling
// while idle?
// wait here while idle// "ASQ.iterable()"
"use strict";
(function IIFE() {
var template;
ASQ.iterable = function $$iterable() {
function throwSequenceErrors() {
throw sequence_errors.length === 1 ? sequence_errors[0] : sequence_errors;
}
function notifyErrors() {
var fn;
seq_tick = null;
if (seq_error) {
if (or_queue.length === 0 && !error_reported) {
error_reported = true;
throwSequenceErrors();
}
while (or_queue.length > 0) {
error_reported = true;
fn = or_queue.shift();
try {
fn.apply(ø, sequence_errors);
} catch (err) {
if (checkBranding(err)) {
sequence_errors = sequence_errors.concat(err);
} else {
sequence_errors.push(err);
}
if (or_queue.length === 0) {
throwSequenceErrors();
}
}
}
}
}
function val() {
if (seq_error || seq_aborted || arguments.length === 0) {
return sequence_api;
}
var args = ARRAY_SLICE.call(arguments).map(function mapper(arg) {
if (typeof arg != "function") return function $$val() {
return arg;
};else return arg;
});
val_queue.push.apply(val_queue, args);
return sequence_api;
}
function or() {
if (seq_aborted || arguments.length === 0) {
return sequence_api;
}
or_queue.push.apply(or_queue, arguments);
if (!seq_tick) {
seq_tick = schedule(notifyErrors);
}
return sequence_api;
}
function pipe() {
if (seq_aborted || arguments.length === 0) {
return sequence_api;
}
ARRAY_SLICE.call(arguments).forEach(function $$each(fn) {
val(fn).or(fn.fail);
});
return sequence_api;
}
function next() {
if (seq_error || seq_aborted || val_queue.length === 0) {
if (val_queue.length > 0) {
$throw$("Sequence cannot be iterated");
}
return { done: true };
}
try {
return { value: val_queue.shift().apply(ø, arguments) };
} catch (err) {
if (ASQ.isMessageWrapper(err)) {
$throw$.apply(ø, err);
} else {
$throw$(err);
}
return {};
}
}
function $throw$() {
if (seq_error || seq_aborted) {
return sequence_api;
}
sequence_errors.push.apply(sequence_errors, arguments);
seq_error = true;
if (!seq_tick) {
seq_tick = schedule(notifyErrors);
}
return sequence_api;
}
function $return$(val) {
if (seq_error || seq_aborted) {
val = void 0;
}
abort();
return { done: true, value: val };
}
function abort() {
if (seq_error || seq_aborted) {
return;
}
seq_aborted = true;
clearTimeout(seq_tick);
seq_tick = null;
val_queue.length = or_queue.length = sequence_errors.length = 0;
}
function duplicate() {
var isq;
template = {
val_queue: val_queue.slice(),
or_queue: or_queue.slice()
};
isq = ASQ.iterable();
template = null;
return isq;
}
// opt-out of global error reporting for this sequence
function defer() {
or_queue.push(function $$ignored() {});
return sequence_api;
}
// ***********************************************
// Object branding utilities
// ***********************************************
function brandIt(obj) {
Object.defineProperty(obj, brand, {
enumerable: false,
value: true
});
return obj;
}
var sequence_api,
seq_error = false,
error_reported = false,
seq_aborted = false,
seq_tick,
val_queue = [],
or_queue = [],
sequence_errors = [];
// ***********************************************
// Setup the ASQ.iterable() public API
// ***********************************************
sequence_api = brandIt({
val: val,
then: val,
or: or,
pipe: pipe,
next: next,
"throw": $throw$,
"return": $return$,
abort: abort,
duplicate: duplicate,
defer: defer
});
// useful for ES6 `for..of` loops,
// add `@@iterator` to simply hand back
// our iterable sequence itself!
sequence_api[typeof Symbol == "function" && Symbol.iterator || "@@iterator"] = function $$iter() {
return sequence_api;
};
// templating the iterable-sequence setup?
if (template) {
val_queue = template.val_queue.slice(0);
or_queue = template.or_queue.slice(0);
}
// treat ASQ.iterable() constructor parameters as having been
// passed to `val()`
sequence_api.val.apply(ø, arguments);
return sequence_api;
};
})();// "last"
ASQ.extend("last",function $$extend(api,internals){
return function $$last() {
if (internals("seq_error") || internals("seq_aborted") ||
arguments.length === 0
) {
return api;
}
var fns = ARRAY_SLICE.call(arguments);
api.then(function $$then(done){
function reset() {
finished = true;
error_messages.length = 0;
success_messages = null;
}
function complete(trigger) {
if (success_messages != null) {
// last successful segment's message(s) sent
// to main sequence to proceed as success
trigger(
success_messages.length > 1 ?
ASQ.messages.apply(ø,success_messages) :
success_messages[0]
);
}
else {
// send errors into main sequence
error_messages.length = fns.length;
trigger.fail.apply(ø,error_messages);
}
reset();
}
function success(trigger,idx,args) {
if (!finished) {
completed++;
success_messages = args;
// all segments complete?
if (completed === fns.length) {
finished = true;
complete(trigger);
}
}
}
function failure(trigger,idx,args) {
if (!finished &&
!(idx in error_messages)
) {
completed++;
error_messages[idx] =
args.length > 1 ?
ASQ.messages.apply(ø,args) :
args[0]
;
}
// all segments complete?
if (!finished &&
completed === fns.length
) {
finished = true;
complete(trigger);
}
}
var completed = 0, error_messages = [], finished = false,
sq = ASQ.apply(ø,ARRAY_SLICE.call(arguments,1)),
success_messages
;
wrapGate(sq,fns,success,failure,reset);
sq.pipe(done);
});
return api;
};
});
// "map"
ASQ.extend("map",function $$extend(api,internals){
return function $$map(pArr,pEach) {
if (internals("seq_error") || internals("seq_aborted")) {
return api;
}
api.seq(function $$seq(){
var tmp, args = ARRAY_SLICE.call(arguments),
arr = pArr, each = pEach;
// if missing `map(..)` args, use value-messages (if any)
if (!each) each = args.shift();
if (!arr) arr = args.shift();
// if arg types in reverse order (each,arr), swap
if (typeof arr === "function" && Array.isArray(each)) {
tmp = arr;
arr = each;
each = tmp;
}
return ASQ.apply(ø,args)
.gate.apply(ø,arr.map(function $$map(item){
return function $$segment(){
each.apply(ø,[item].concat(ARRAY_SLICE.call(arguments)));
};
}));
})
.val(function $$val(){
// collect all gate segment output into one value-message
// Note: return a normal array here, not a message wrapper!
return ARRAY_SLICE.call(arguments);
});
return api;
};
});
// "none"
ASQ.extend("none",function $$extend(api,internals){
return function $$none() {
if (internals("seq_error") || internals("seq_aborted") ||
arguments.length === 0
) {
return api;
}
var fns = ARRAY_SLICE.call(arguments);
api.then(function $$then(done){
function reset() {
finished = true;
error_messages.length = 0;
success_messages.length = 0;
}
function complete(trigger) {
if (success_messages.length > 0) {
// any successful segment's message(s) sent
// to main sequence to proceed as **error**
success_messages.length = fns.length;
trigger.fail.apply(ø,success_messages);
}
else {
// send errors as **success** to main sequence
error_messages.length = fns.length;
trigger.apply(ø,error_messages);
}
reset();
}
function success(trigger,idx,args) {
if (!finished) {
completed++;
success_messages[idx] =
args.length > 1 ?
ASQ.messages.apply(ø,args) :
args[0]
;
// all segments complete?
if (completed === fns.length) {
finished = true;
complete(trigger);
}
}
}
function failure(trigger,idx,args) {
if (!finished &&
!(idx in error_messages)
) {
completed++;
error_messages[idx] =
args.length > 1 ?
ASQ.messages.apply(ø,args) :
args[0]
;
}
// all segments complete?
if (!finished &&
completed === fns.length
) {
finished = true;
complete(trigger);
}
}
var completed = 0, error_messages = [], finished = false,
sq = ASQ.apply(ø,ARRAY_SLICE.call(arguments,1)),
success_messages = []
;
wrapGate(sq,fns,success,failure,reset);
sq.pipe(done);
});
return api;
};
});
// "pThen"
ASQ.extend("pThen",function $$extend(api,internals){
return function $$pthen(success,failure) {
if (internals("seq_aborted")) {
return api;
}
var ignore_success_handler = false, ignore_failure_handler = false;
if (typeof success === "function") {
api.then(function $$then(done){
if (!ignore_success_handler) {
var ret, msgs = ASQ.messages.apply(ø,arguments);
msgs.shift();
if (msgs.length === 1) {
msgs = msgs[0];
}
ignore_failure_handler = true;
try {
ret = success(msgs);
}
catch (err) {
if (!ASQ.isMessageWrapper(err)) {
err = [err];
}
done.fail.apply(ø,err);
return;
}
// returned a sequence?
if (ASQ.isSequence(ret)) {
ret.pipe(done);
}
// returned a message wrapper?
else if (ASQ.isMessageWrapper(ret)) {
done.apply(ø,ret);
}
// returned a promise/thenable?
else if (isPromise(ret)) {
ret.then(done,done.fail);
}
// just a normal value to pass along
else {
done(ret);
}
}
else {
done.apply(ø,ARRAY_SLICE.call(arguments,1));
}
});
}
if (typeof failure === "function") {
api.or(function $$or(){
if (!ignore_failure_handler) {
var ret, msgs = ASQ.messages.apply(ø,arguments), smgs,
or_queue = ARRAY_SLICE.call(internals("or_queue"))
;
if (msgs.length === 1) {
msgs = msgs[0];
}
ignore_success_handler = true;
// NOTE: if this call throws, that'll automatically
// be handled by core as we'd want it to be
ret = failure(msgs);
// if we get this far:
// first, inject return value (if any) as
// next step's sequence messages
smgs = internals("sequence_messages");
smgs.length = 0;
if (typeof ret !== "undefined") {
if (!ASQ.isMessageWrapper(ret)) {
ret = [ret];
}
smgs.push.apply(smgs,ret);
}
// reset internal error state, because we've exclusively
// handled any errors up to this point of the sequence
internals("sequence_errors").length = 0;
internals("seq_error",false);
internals("then_ready",true);
// temporarily empty the or-queue
internals("or_queue").length = 0;
// make sure to schedule success-procession on the chain
api.val(function $$val(){
// pass thru messages
return ASQ.messages.apply(ø,arguments);
});
// at next cycle, reinstate the or-queue (if any)
if (or_queue.length > 0) {
schedule(function $$schedule(){
api.or.apply(ø,or_queue);
});
}
}
});
}
return api;
};
});
// "pCatch"
ASQ.extend("pCatch",function $$extend(api,internals){
return function $$pcatch(failure) {
if (internals("seq_aborted")) {
return api;
}
api.pThen(void 0,failure);
return api;
};
});
// "race"
ASQ.extend("race",function $$extend(api,internals){
return function $$race() {
if (internals("seq_error") || internals("seq_aborted") ||
arguments.length === 0
) {
return api;
}
var fns = ARRAY_SLICE.call(arguments)
.map(function $$map(v){
var def;
// tap any directly-provided sequences immediately
if (ASQ.isSequence(v)) {
def = { seq: v };
tapSequence(def);
return function $$fn(done) {
def.seq.pipe(done);
};
}
else return v;
});
api.then(function $$then(done){
var args = ARRAY_SLICE.call(arguments);
fns.forEach(function $$each(fn){
fn.apply(ø,args);
});
});
return api;
};
});
// "react" (reactive sequences)
ASQ.react = function $$react(reactor) {
function next() {
if (template) {
var sq = template.duplicate();
sq.unpause.apply(ø,arguments);
return sq;
}
return ASQ(function $$asq(){ throw "Disabled Sequence"; });
}
function registerTeardown(fn) {
if (template && typeof fn === "function") {
teardowns.push(fn);
}
}
var template = ASQ().duplicate(),
teardowns = []
;
// add reactive sequence kill switch
template.stop = function $$stop() {
if (template) {
template = null;
teardowns.forEach(Function.call,Function.call);
teardowns.length = 0;
}
};
next.onStream = function $$onStream() {
ARRAY_SLICE.call(arguments)
.forEach(function $$each(stream){
stream.on("data",next);
stream.on("error",next);
});
};
next.unStream = function $$unStream() {
ARRAY_SLICE.call(arguments)
.forEach(function $$each(stream){
stream.removeListener("data",next);
stream.removeListener("error",next);
});
};
// make sure `reactor(..)` is called async
ASQ.__schedule(function $$schedule(){
reactor.call(template,next,registerTeardown);
});
return template;
};
// "react" helpers
(function IIFE(){
function tapSequences() {
function tapSequence(seq) {
// temporary `trigger` which, if called before being replaced
// below, creates replacement proxy sequence with the
// event message(s) re-fired
function trigger() {
var args = ARRAY_SLICE.call(arguments);
def.seq = ASQ.react(function $$react(next){
next.apply(ø,args);
});
}
if (ASQ.isSequence(seq)) {
var def = { seq: seq };
// listen for events from the sequence-stream
seq.val(function $$val(){
trigger.apply(ø,arguments);
return ASQ.messages.apply(ø,arguments);
});
// make a reactive sequence to act as a proxy to the original
// sequence
def.seq = ASQ.react(function $$react(next){
// replace the temporary trigger (created above)
// with this proxy's trigger
trigger = next;
});
return def;
}
}
return ARRAY_SLICE.call(arguments)
.map(tapSequence)
.filter(Boolean);
}
function createReactOperator(buffer) {
return function $$react$operator(){
function reactor(next,registerTeardown){
function processSequence(def) {
// sequence-stream event listener
function trigger() {
var args = ASQ.messages.apply(ø,arguments);
// still observing sequence-streams?
if (seqs && seqs.length > 0) {
// store event message(s), if any
seq_events[seq_id] = [].concat(
buffer ? seq_events[seq_id] : [],
args.length > 0 ? [args] : undefined
);
// collect event message(s) across the
// sequence-stream sources
var messages = seq_events.reduce(function reducer(msgs,eventList,idx){
if (eventList.length > 0) msgs.push(eventList[0]);
return msgs;
},[]);
// did all sequence-streams get an event?
if (messages.length == seq_events.length) {
if (messages.length == 1) messages = messages[0];
// fire off reactive sequence instance
next.apply(ø,messages);
// discard stored event message(s)
seq_events.forEach(function $$each(eventList){
eventList.shift();
});
}
}
// keep sequence going
return args;
}
var seq_id = seq_events.length;
seq_events.push([]);
def.seq.val(trigger);
}
// process all sequence-streams
seqs.forEach(processSequence);
// listen for stop() of reactive sequence
registerTeardown(function $$teardown(){
seqs = seq_events = null;
});
}
var seq_events = [],
// observe all sequence-streams
seqs = tapSequences.apply(null,arguments)
;
if (seqs.length == 0) return;
return ASQ.react(reactor);
};
}
ASQ.react.all = ASQ.react.zip = createReactOperator(/*buffer=*/true);
ASQ.react.latest = ASQ.react.combine = createReactOperator(/*buffer=false*/);
ASQ.react.any = ASQ.react.merge = function $$react$any(){
function reactor(next,registerTeardown){
function processSequence(def){
function trigger(){
var args = ASQ.messages.apply(ø,arguments);
// still observing sequence-streams?
if (seqs && seqs.length > 0) {
// fire off reactive sequence instance
next.apply(ø,args);
}
// keep sequence going
return args;
}
// sequence-stream event listener
def.seq.val(trigger);
}
// observe all sequence-streams
seqs.forEach(processSequence);
// listen for stop() of reactive sequence
registerTeardown(function $$teardown(){
seqs = null;
});
}
// observe all sequence-streams
var seqs = tapSequences.apply(null,arguments);
if (seqs.length == 0) return;
return ASQ.react(reactor);
};
ASQ.react.distinct = function $$react$distinct(seq){
function filterer() {
function isDuplicate(msgs) {
return (
msgs.length == messages.length &&
msgs.every(function $$every(val,idx){
return val === messages[idx];
})
);
}
var messages = ASQ.messages.apply(ø,arguments);
// any messages to check against?
if (messages.length > 0) {
// messages already sent before?
if (prev_messages.some(isDuplicate)) {
// bail on duplicate messages
return false;
}
// save messages for future distinct checking
prev_messages.push(messages);
}
// allow distinct non-duplicate value through
return true;
}
var prev_messages = [];
return ASQ.react.filter(seq,filterer);
};
ASQ.react.filter = function $$react$filter(seq,filterer){
function reactor(next,registerTeardown) {
function trigger(){
var messages = ASQ.messages.apply(ø,arguments);
if (filterer && filterer.apply(ø,messages)) {
// fire off reactive sequence instance
next.apply(ø,messages);
}
// keep sequence going
return messages;
}
// sequence-stream event listener
def.seq.val(trigger);
// listen for stop() of reactive sequence
registerTeardown(function $$teardown(){
def = filterer = null;
});
}
// observe sequence-stream
var def = tapSequences(seq)[0];
if (!def) return;
return ASQ.react(reactor);
};
ASQ.react.fromObservable = function $$react$from$observable(obsv){
function reactor(next,registerTeardown){
// process buffer (if any)
buffer.forEach(next);
buffer.length = 0;
// start non-buffered notifications?
if (!buffer.complete) {
notify = next;
}
registerTeardown(function $$teardown(){
obsv.dispose();
});
}
function notify(v) {
buffer.push(v);
}
var buffer = [];
obsv.subscribe(
function $$on$next(v){
notify(v);
},
function $$on$error(){},
function $$on$complete(){
buffer.complete = true;
obsv.dispose();
}
);
return ASQ.react(reactor);
};
ASQ.extend("toObservable",function $$extend(api,internals){
return function $$to$observable(){
function init(observer) {
function define(pair){
function listen(){
var args = ASQ.messages.apply(ø,arguments);
observer[pair[1]].apply(observer,
args.length == 1 ? [args[0]] : args
);
return args;
}
api[pair[0]](listen);
}
[["val","onNext"],["or","onError"]]
.forEach(define);
}
return Rx.Observable.create(init);
};
});
})();
// "runner"
ASQ.extend("runner",function $$extend(api,internals){
return function $$runner() {
if (internals("seq_error") || internals("seq_aborted") ||
arguments.length === 0
) {
return api;
}
var args = ARRAY_SLICE.call(arguments);
api
.then(function $$then(mainDone){
function wrap(v) {
// function? expected to produce an iterator
// (like a generator) or a promise
if (typeof v === "function") {
// call function passing in the control token
// note: neutralize `this` in call to prevent
// unexpected behavior
v = v.call(ø,next_val);
// promise returned (ie, from async function)?
if (isPromise(v)) {
// wrap it in iterable sequence
v = ASQ.iterable(v);
}
}
// an iterable sequence? duplicate it (in case of multiple runs)
else if (ASQ.isSequence(v) && "next" in v) {
v = v.duplicate();
}
// wrap anything else in iterable sequence
else {
v = ASQ.iterable(v);
}
// a sequence to tap for errors?
if (ASQ.isSequence(v)) {
// listen for any sequence failures
v.or(function $$or(){
// signal iteration-error
mainDone.fail.apply(ø,arguments);
});
}
return v;
}
function addWrapped() {
iterators.push.apply(
iterators,
ARRAY_SLICE.call(arguments).map(wrap)
);
}
var iterators = args,
token = {
messages: ARRAY_SLICE.call(arguments,1),
add: addWrapped
},
iter, ret, next_val = token
;
// map co-routines to round-robin list of iterators
iterators = iterators.map(wrap);
// async iteration of round-robin list
(function iterate(){
// get next co-routine in list
iter = iterators.shift();
// process the iteration
try {
// multiple messages to send to an iterable
// sequence?
if (ASQ.isMessageWrapper(next_val) &&
ASQ.isSequence(iter)
) {
ret = iter.next.apply(iter,next_val);
}
else {
ret = iter.next(next_val);
}
}
catch (err) {
return mainDone.fail(err);
}
// bail on run in aborted sequence
if (internals("seq_aborted")) return;
// was the control token yielded?
if (ret.value === token) {
// round-robin: put co-routine back into the list
// at the end where it was so it can be processed
// again on next loop-iteration
iterators.push(iter);
next_val = token;
schedule(iterate); // async recurse
}
else {
// not a recognized ASQ instance returned?
if (!ASQ.isSequence(ret.value)) {
// received a thenable/promise back?
if (isPromise(ret.value)) {
// wrap in a sequence
ret.value = ASQ().promise(ret.value);
}
// thunk yielded?
else if (typeof ret.value === "function") {
// wrap thunk call in a sequence
var fn = ret.value;
ret.value = ASQ(function $$ASQ(done){
fn(done.errfcb);
});
}
// message wrapper returned?
else if (ASQ.isMessageWrapper(ret.value)) {
// wrap message(s) in a sequence
ret.value = ASQ.apply(ø,
// don't let `apply(..)` discard an empty message
// wrapper! instead, pass it along as its own value
// itself.
ret.value.length > 0 ? ret.value : ASQ.messages(undefined)
);
}
// non-undefined value returned?
else if (typeof ret.value !== "undefined") {
// wrap the value in a sequence
ret.value = ASQ(ret.value);
}
else {
// make an empty sequence
ret.value = ASQ();
}
}
ret.value
.val(function $$val(){
// bail on run in aborted sequence
if (internals("seq_aborted")) return;
if (arguments.length > 0) {
// save any return messages for input
// to next iteration
next_val = arguments.length > 1 ?
ASQ.messages.apply(ø,arguments) :
arguments[0]
;
}
// still more to iterate?
if (!ret.done) {
// was the control token passed along?
if (next_val === token) {
// round-robin: put co-routine back into the list
// at the end, so that the the next iterator can be
// processed on next loop-iteration
iterators.push(iter);
}
else {
// put co-routine back in where it just
// was so it can be processed again on
// next loop-iteration
iterators.unshift(iter);
}
}
// still have some co-routine runs to process?
if (iterators.length > 0) {
iterate(); // async recurse
}
// all done!
else {
// previous value message?
if (typeof next_val !== "undefined") {
// not a message wrapper array?
if (!ASQ.isMessageWrapper(next_val)) {
// wrap value for the subsequent `apply(..)`
next_val = [next_val];
}
}
else {
// nothing to affirmatively pass along
next_val = [];
}
// signal done with all co-routine runs
mainDone.apply(ø,next_val);
}
})
.or(function $$or(){
// bail on run in aborted sequence
if (internals("seq_aborted")) return;
try {
// if an error occurs in the step-continuation
// promise or sequence, throw it back into the
// generator or iterable-sequence
iter["throw"].apply(iter,arguments);
}
catch (err) {
// if an error comes back out of after the throw,
// pass it out to the main sequence, as iteration
// must now be complete
mainDone.fail(err);
}
});
}
})();
});
return api;
};
});
// "toPromise"
ASQ.extend("toPromise",function $$extend(api,internals){
return function $$to$promise() {
return new Promise(function $$executor(resolve,reject){
api
.val(function $$val(){
var args = ARRAY_SLICE.call(arguments);
resolve.call(ø,args.length > 1 ? args : args[0]);
return ASQ.messages.apply(ø,args);
})
.or(function $$or(){
var args = ARRAY_SLICE.call(arguments);
reject.call(ø,args.length > 1 ? args : args[0]);
});
});
};
});
// "try"
ASQ.extend("try",function $$extend(api,internals){
return function $$try() {
if (internals("seq_error") || internals("seq_aborted") ||
arguments.length === 0
) {
return api;
}
var fns = ARRAY_SLICE.call(arguments)
.map(function $$map(fn){
return function $$then(mainDone) {
var main_args = ARRAY_SLICE.call(arguments),
sq = ASQ.apply(ø,main_args.slice(1))
;
sq
.then(function $$inner$then(){
fn.apply(ø,arguments);
})
.val(function $$val(){
mainDone.apply(ø,arguments);
})
.or(function $$inner$or(){
var msgs = ASQ.messages.apply(ø,arguments);
// failed, so map error(s) as `catch`
mainDone({
"catch": msgs.length > 1 ? msgs : msgs[0]
});
});
};
});
api.then.apply(ø,fns);
return api;
};
});
// "until"
ASQ.extend("until",function $$extend(api,internals){
return function $$until() {
if (internals("seq_error") || internals("seq_aborted") ||
arguments.length === 0
) {
return api;
}
var fns = ARRAY_SLICE.call(arguments)
.map(function $$map(fn){
return function $$then(mainDone) {
var main_args = ARRAY_SLICE.call(arguments),
sq = ASQ.apply(ø,main_args.slice(1))
;
sq
.then(function $$inner$then(){
var args = ARRAY_SLICE.call(arguments);
args[0]["break"] = function $$break(){
mainDone.fail.apply(ø,arguments);
sq.abort();
};
fn.apply(ø,args);
})
.val(function $$val(){
mainDone.apply(ø,arguments);
})
.or(function $$inner$or(){
// failed, retry
$$then.apply(ø,main_args);
});
};
});
api.then.apply(ø,fns);
return api;
};
});
// "waterfall"
ASQ.extend("waterfall",function $$extend(api,internals){
return function $$waterfall() {
if (internals("seq_error") || internals("seq_aborted") ||
arguments.length === 0
) {
return api;
}
var fns = ARRAY_SLICE.call(arguments);
api.then(function $$then(done){
var msgs = ASQ.messages(),
sq = ASQ.apply(ø,ARRAY_SLICE.call(arguments,1))
;
fns.forEach(function $$each(fn){
sq.then(fn)
.val(function $$val(){
var args = ASQ.messages.apply(ø,arguments);
msgs.push(args.length > 1 ? args : args[0]);
return msgs;
});
});
sq.pipe(done);
});
return api;
};
});
// "wrap"
ASQ.wrap = function $$wrap(fn,opts) {
function checkThis(t,o) {
return (!t ||
(typeof window != "undefined" && t === window) ||
(typeof global != "undefined" && t === global)
) ? o : t;
}
var errfcb, params_first, act, this_obj;
opts = (opts && typeof opts == "object") ? opts : {};
if (
(opts.errfcb && opts.splitcb) ||
(opts.errfcb && opts.simplecb) ||
(opts.splitcb && opts.simplecb) ||
("errfcb" in opts && !opts.errfcb && !opts.splitcb && !opts.simplecb) ||
(opts.params_first && opts.params_last)
) {
throw Error("Invalid options");
}
// initialize default flags
this_obj = (opts["this"] && typeof opts["this"] == "object") ? opts["this"] : ø;
errfcb = opts.errfcb || !(opts.splitcb || opts.simplecb);
params_first = !!opts.params_first ||
(!opts.params_last && !("params_first" in opts || opts.params_first)) ||
("params_last" in opts && !opts.params_first && !opts.params_last)
;
if (params_first) {
act = "push";
}
else {
act = "unshift";
}
if (opts.gen) {
return function $$wrapped$gen() {
return ASQ.apply(ø,arguments).runner(fn);
};
}
if (errfcb) {
return function $$wrapped$errfcb() {
var args = ARRAY_SLICE.call(arguments),
_this = checkThis(this,this_obj)
;
return ASQ(function $$asq(done){
args[act](done.errfcb);
fn.apply(_this,args);
});
};
}
if (opts.splitcb) {
return function $$wrapped$splitcb() {
var args = ARRAY_SLICE.call(arguments),
_this = checkThis(this,this_obj)
;
return ASQ(function $$asq(done){
args[act](done,done.fail);
fn.apply(_this,args);
});
};
}
if (opts.simplecb) {
return function $$wrapped$simplecb() {
var args = ARRAY_SLICE.call(arguments),
_this = checkThis(this,this_obj)
;
return ASQ(function $$asq(done){
args[act](done);
fn.apply(_this,args);
});
};
}
};
// just return `ASQ` itself for convenience sake
return ASQ;
});<|fim▁end|>
|
};
});
// "go-style CSP"
|
<|file_name|>models.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
"""
Models for Credit Eligibility for courses.
Credit courses allow students to receive university credit for
successful completion of a course on EdX
"""
from __future__ import absolute_import
import datetime
import logging
from collections import defaultdict
import pytz
import six
from config_models.models import ConfigurationModel
from django.conf import settings
from django.core.cache import cache
from django.core.validators import RegexValidator
from django.db import IntegrityError, models, transaction
from django.dispatch import receiver
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from edx_django_utils.cache import RequestCache
from jsonfield.fields import JSONField
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField
from openedx.core.lib.cache_utils import request_cached
CREDIT_PROVIDER_ID_REGEX = r"[a-z,A-Z,0-9,\-]+"
log = logging.getLogger(__name__)
@python_2_unicode_compatible
class CreditProvider(TimeStampedModel):
"""
This model represents an institution that can grant credit for a course.
Each provider is identified by unique ID (e.g., 'ASU'). CreditProvider also
includes a `url` where the student will be sent when he/she will try to
get credit for course. Eligibility duration will be use to set duration
for which credit eligible message appears on dashboard.
.. no_pii:
"""
provider_id = models.CharField(
max_length=255,
unique=True,
validators=[
RegexValidator(
regex=CREDIT_PROVIDER_ID_REGEX,
message="Only alphanumeric characters and hyphens (-) are allowed",
code="invalid_provider_id",
)
],
help_text=ugettext_lazy(
"Unique identifier for this credit provider. "
"Only alphanumeric characters and hyphens (-) are allowed. "
"The identifier is case-sensitive."
)
)
active = models.BooleanField(
default=True,
help_text=ugettext_lazy("Whether the credit provider is currently enabled.")
)
display_name = models.CharField(
max_length=255,
help_text=ugettext_lazy("Name of the credit provider displayed to users")
)
enable_integration = models.BooleanField(
default=False,
help_text=ugettext_lazy(
"When true, automatically notify the credit provider "
"when a user requests credit. "
"In order for this to work, a shared secret key MUST be configured "
"for the credit provider in secure auth settings."
)
)
provider_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL of the credit provider. If automatic integration is "
"enabled, this will the the end-point that we POST to "
"to notify the provider of a credit request. Otherwise, the "
"user will be shown a link to this URL, so the user can "
"request credit from the provider directly."
)
)
provider_status_url = models.URLField(
default="",
help_text=ugettext_lazy(
"URL from the credit provider where the user can check the status "
"of his or her request for credit. This is displayed to students "
"*after* they have requested credit."
)
)
provider_description = models.TextField(
default="",
help_text=ugettext_lazy(
"Description for the credit provider displayed to users."
)
)
fulfillment_instructions = models.TextField(
null=True,
blank=True,
help_text=ugettext_lazy(
"Plain text or html content for displaying further steps on "
"receipt page *after* paying for the credit to get credit for a "
"credit course against a credit provider."
)
)
eligibility_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit eligibility email content which is sent when user has met "
"all credit eligibility requirements."
)
)
receipt_email_message = models.TextField(
default="",
help_text=ugettext_lazy(
"Plain text or html content for displaying custom message inside "
"credit receipt email content which is sent *after* paying to get "
"credit for a credit course."
)
)
thumbnail_url = models.URLField(
default="",
max_length=255,
help_text=ugettext_lazy(
"Thumbnail image url of the credit provider."
)
)
CREDIT_PROVIDERS_CACHE_KEY = "credit.providers.list"
@classmethod
def get_credit_providers(cls, providers_list=None):
"""
Retrieve a list of all credit providers or filter on providers_list, represented
as dictionaries.
Arguments:
provider_list (list of strings or None): contains list of ids if required results
to be filtered, None for all providers.
Returns:
list of providers represented as dictionaries.
"""
# Attempt to retrieve the credit provider list from the cache if provider_list is None
# The cache key is invalidated when the provider list is updated
# (a post-save signal handler on the CreditProvider model)
# This doesn't happen very often, so we would expect a *very* high
# cache hit rate.
credit_providers = cache.get(cls.CREDIT_PROVIDERS_CACHE_KEY)
if credit_providers is None:
# Cache miss: construct the provider list and save it in the cache
credit_providers = CreditProvider.objects.filter(active=True)
credit_providers = [
{
"id": provider.provider_id,
"display_name": provider.display_name,
"url": provider.provider_url,
"status_url": provider.provider_status_url,
"description": provider.provider_description,
"enable_integration": provider.enable_integration,
"fulfillment_instructions": provider.fulfillment_instructions,
"thumbnail_url": provider.thumbnail_url,
}
for provider in credit_providers
]
cache.set(cls.CREDIT_PROVIDERS_CACHE_KEY, credit_providers)
if providers_list:
credit_providers = [provider for provider in credit_providers if provider['id'] in providers_list]
return credit_providers
@classmethod
def get_credit_provider(cls, provider_id):
"""
Retrieve a credit provider with provided 'provider_id'.
"""
try:
return CreditProvider.objects.get(active=True, provider_id=provider_id)
except cls.DoesNotExist:
return None
def __str__(self):
"""Unicode representation of the credit provider. """
return self.provider_id
@receiver(models.signals.post_save, sender=CreditProvider)
@receiver(models.signals.post_delete, sender=CreditProvider)
def invalidate_provider_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit providers. """
cache.delete(CreditProvider.CREDIT_PROVIDERS_CACHE_KEY)
@python_2_unicode_compatible
class CreditCourse(models.Model):
"""
Model for tracking a credit course.
.. no_pii:
"""
course_key = CourseKeyField(max_length=255, db_index=True, unique=True)
enabled = models.BooleanField(default=False)
CREDIT_COURSES_CACHE_KEY = "credit.courses.set"
@classmethod
def is_credit_course(cls, course_key):
"""
Check whether the course has been configured for credit.
Args:
course_key (CourseKey): Identifier of the course.
Returns:
bool: True iff this is a credit course.
"""
credit_courses = cache.get(cls.CREDIT_COURSES_CACHE_KEY)
if credit_courses is None:
credit_courses = set(
six.text_type(course.course_key)
for course in cls.objects.filter(enabled=True)
)
cache.set(cls.CREDIT_COURSES_CACHE_KEY, credit_courses)
return six.text_type(course_key) in credit_courses
@classmethod
def get_credit_course(cls, course_key):
"""
Get the credit course if exists for the given 'course_key'.
Args:
course_key(CourseKey): The course identifier
Raises:
DoesNotExist if no CreditCourse exists for the given course key.
Returns:
CreditCourse if one exists for the given course key.
"""
return cls.objects.get(course_key=course_key, enabled=True)
def __str__(self):
"""Unicode representation of the credit course. """
return six.text_type(self.course_key)
@receiver(models.signals.post_save, sender=CreditCourse)
@receiver(models.signals.post_delete, sender=CreditCourse)
def invalidate_credit_courses_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit courses. """
cache.delete(CreditCourse.CREDIT_COURSES_CACHE_KEY)
@python_2_unicode_compatible
class CreditRequirement(TimeStampedModel):
"""
This model represents a credit requirement.
Each requirement is uniquely identified by its 'namespace' and
'name' fields.
The 'name' field stores the unique name or location (in case of XBlock)
for a requirement, which serves as the unique identifier for that
requirement.
The 'display_name' field stores the display name of the requirement.
The 'criteria' field dictionary provides additional information, clients
may need to determine whether a user has satisfied the requirement.
.. no_pii:
"""
course = models.ForeignKey(CreditCourse, related_name="credit_requirements", on_delete=models.CASCADE)
namespace = models.CharField(max_length=255)
name = models.CharField(max_length=255)
display_name = models.CharField(max_length=255, default="")
order = models.PositiveIntegerField(default=0)
criteria = JSONField()
active = models.BooleanField(default=True)
CACHE_NAMESPACE = u"credit.CreditRequirement.cache."
class Meta(object):
unique_together = ('namespace', 'name', 'course')
ordering = ["order"]
def __str__(self):
return u'{course_id} - {name}'.format(course_id=self.course.course_key, name=self.display_name)
@classmethod
def add_or_update_course_requirement(cls, credit_course, requirement, order):
"""
Add requirement to a given course.
Args:
credit_course(CreditCourse): The identifier for credit course
requirement(dict): Requirement dict to be added
Returns:
(CreditRequirement, created) tuple
"""
credit_requirement, created = cls.objects.get_or_create(
course=credit_course,
namespace=requirement["namespace"],
name=requirement["name"],
defaults={
"display_name": requirement["display_name"],
"criteria": requirement["criteria"],
"order": order,
"active": True
}
)
if not created:
credit_requirement.criteria = requirement["criteria"]
credit_requirement.active = True
credit_requirement.order = order
credit_requirement.display_name = requirement["display_name"]
credit_requirement.save()
return credit_requirement, created
@classmethod
@request_cached(namespace=CACHE_NAMESPACE)
def get_course_requirements(cls, course_key, namespace=None, name=None):
"""
Get credit requirements of a given course.
Args:
course_key (CourseKey): The identifier for a course
Keyword Arguments
namespace (str): Optionally filter credit requirements by namespace.
name (str): Optionally filter credit requirements by name.
Returns:
QuerySet of CreditRequirement model
"""
# order credit requirements according to their appearance in courseware
requirements = CreditRequirement.objects.filter(course__course_key=course_key, active=True)
if namespace is not None:
requirements = requirements.filter(namespace=namespace)
if name is not None:
requirements = requirements.filter(name=name)
return requirements
@classmethod
def disable_credit_requirements(cls, requirement_ids):
"""
Mark the given requirements inactive.
Args:
requirement_ids(list): List of ids
Returns:
None
"""
cls.objects.filter(id__in=requirement_ids).update(active=False)
@classmethod
def get_course_requirement(cls, course_key, namespace, name):
"""
Get credit requirement of a given course.
Args:
course_key(CourseKey): The identifier for a course
namespace(str): Namespace of credit course requirements
name(str): Name of credit course requirement
Returns:
CreditRequirement object if exists, None otherwise.
"""
try:
return cls.objects.get(
course__course_key=course_key, active=True, namespace=namespace, name=name
)
except cls.DoesNotExist:
return None
@receiver(models.signals.post_save, sender=CreditRequirement)
@receiver(models.signals.post_delete, sender=CreditRequirement)
def invalidate_credit_requirement_cache(sender, **kwargs): # pylint: disable=unused-argument
"""Invalidate the cache of credit requirements. """
RequestCache(namespace=CreditRequirement.CACHE_NAMESPACE).clear()
class CreditRequirementStatus(TimeStampedModel):
"""
This model represents the status of each requirement.
For a particular credit requirement, a user can either:
1) Have satisfied the requirement (example: approved in-course reverification)
2) Have failed the requirement (example: denied in-course reverification)
3) Neither satisfied nor failed (example: the user hasn't yet attempted in-course reverification).
Cases (1) and (2) are represented by having a CreditRequirementStatus with
the status set to "satisfied" or "failed", respectively.
In case (3), no CreditRequirementStatus record will exist for the requirement and user.
.. no_pii:
"""
REQUIREMENT_STATUS_CHOICES = (
("satisfied", "satisfied"),
("failed", "failed"),
("declined", "declined"),
)
username = models.CharField(max_length=255, db_index=True)
requirement = models.ForeignKey(CreditRequirement, related_name="statuses", on_delete=models.CASCADE)
status = models.CharField(max_length=32, choices=REQUIREMENT_STATUS_CHOICES)
# Include additional information about why the user satisfied or failed
# the requirement. This is specific to the type of requirement.
# For example, the minimum grade requirement might record the user's
# final grade when the user completes the course. This allows us to display
# the grade to users later and to send the information to credit providers.
reason = JSONField(default={})
class Meta(object):
unique_together = ('username', 'requirement')
verbose_name_plural = ugettext_lazy('Credit requirement statuses')
@classmethod
def get_statuses(cls, requirements, username):
"""
Get credit requirement statuses of given requirement and username
Args:
requirements(list of CreditRequirements): The identifier for a requirement
username(str): username of the user
Returns:
Queryset 'CreditRequirementStatus' objects
"""
return cls.objects.filter(requirement__in=requirements, username=username)
@classmethod
@transaction.atomic
def add_or_update_requirement_status(cls, username, requirement, status="satisfied", reason=None):
"""
Add credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
status(str): Status of the requirement
reason(dict): Reason of the status
"""
requirement_status, created = cls.objects.get_or_create(
username=username,
requirement=requirement,
defaults={"reason": reason, "status": status}
)
if not created:
# do not update status to `failed` if user has `satisfied` the requirement
if status == 'failed' and requirement_status.status == 'satisfied':
log.info(
u'Can not change status of credit requirement "%s" from satisfied to failed ',
requirement_status.requirement_id
)
return
requirement_status.status = status
requirement_status.reason = reason
requirement_status.save()
@classmethod
@transaction.atomic
def remove_requirement_status(cls, username, requirement):
"""
Remove credit requirement status for given username.
Args:
username(str): Username of the user
requirement(CreditRequirement): 'CreditRequirement' object
"""
try:
requirement_status = cls.objects.get(username=username, requirement=requirement)
requirement_status.delete()
except cls.DoesNotExist:
log_msg = (
u'The requirement status {requirement} does not exist for username {username}.'.format(
requirement=requirement,
username=username
)
)
log.error(log_msg)
return
@classmethod
def retire_user(cls, retirement):
"""
Retire a user by anonymizing
Args:
retirement: UserRetirementStatus of the user being retired
"""
requirement_statuses = cls.objects.filter(
username=retirement.original_username
).update(
username=retirement.retired_username,
reason={},
)
return requirement_statuses > 0
def default_deadline_for_credit_eligibility():
"""
The default deadline to use when creating a new CreditEligibility model.
"""
return datetime.datetime.now(pytz.UTC) + datetime.timedelta(
days=getattr(settings, "CREDIT_ELIGIBILITY_EXPIRATION_DAYS", 365)
)
@python_2_unicode_compatible
class CreditEligibility(TimeStampedModel):
"""
A record of a user's eligibility for credit for a specific course.
.. no_pii:
"""
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="eligibilities", on_delete=models.CASCADE)
# Deadline for when credit eligibility will expire.
# Once eligibility expires, users will no longer be able to purchase
# or request credit.
# We save the deadline as a database field just in case
# we need to override the deadline for particular students.
deadline = models.DateTimeField(
default=default_deadline_for_credit_eligibility,
help_text=ugettext_lazy("Deadline for purchasing and requesting credit.")
)
<|fim▁hole|> class Meta(object):
unique_together = ('username', 'course')
verbose_name_plural = "Credit eligibilities"
@classmethod
def update_eligibility(cls, requirements, username, course_key):
"""
Update the user's credit eligibility for a course.
A user is eligible for credit when the user has satisfied
all requirements for credit in the course.
Arguments:
requirements (Queryset): Queryset of `CreditRequirement`s to check.
username (str): Identifier of the user being updated.
course_key (CourseKey): Identifier of the course.
Returns: tuple
"""
# Check all requirements for the course to determine if the user
# is eligible. We need to check all the *requirements*
# (not just the *statuses*) in case the user doesn't yet have
# a status for a particular requirement.
status_by_req = defaultdict(lambda: False)
for status in CreditRequirementStatus.get_statuses(requirements, username):
status_by_req[status.requirement.id] = status.status
is_eligible = all(status_by_req[req.id] == "satisfied" for req in requirements)
# If we're eligible, then mark the user as being eligible for credit.
if is_eligible:
try:
CreditEligibility.objects.create(
username=username,
course=CreditCourse.objects.get(course_key=course_key),
)
return is_eligible, True
except IntegrityError:
return is_eligible, False
else:
return is_eligible, False
@classmethod
def get_user_eligibilities(cls, username):
"""
Returns the eligibilities of given user.
Args:
username(str): Username of the user
Returns:
CreditEligibility queryset for the user
"""
return cls.objects.filter(
username=username,
course__enabled=True,
deadline__gt=datetime.datetime.now(pytz.UTC)
).select_related('course')
@classmethod
def is_user_eligible_for_credit(cls, course_key, username):
"""
Check if the given user is eligible for the provided credit course
Args:
course_key(CourseKey): The course identifier
username(str): The username of the user
Returns:
Bool True if the user eligible for credit course else False
"""
return cls.objects.filter(
course__course_key=course_key,
course__enabled=True,
username=username,
deadline__gt=datetime.datetime.now(pytz.UTC),
).exists()
def __str__(self):
"""Unicode representation of the credit eligibility. """
return u"{user}, {course}".format(
user=self.username,
course=self.course.course_key,
)
@python_2_unicode_compatible
class CreditRequest(TimeStampedModel):
"""
A request for credit from a particular credit provider.
When a user initiates a request for credit, a CreditRequest record will be created.
Each CreditRequest is assigned a unique identifier so we can find it when the request
is approved by the provider. The CreditRequest record stores the parameters to be sent
at the time the request is made. If the user re-issues the request
(perhaps because the user did not finish filling in forms on the credit provider's site),
the request record will be updated, but the UUID will remain the same.
.. no_pii:
"""
uuid = models.CharField(max_length=32, unique=True, db_index=True)
username = models.CharField(max_length=255, db_index=True)
course = models.ForeignKey(CreditCourse, related_name="credit_requests", on_delete=models.CASCADE)
provider = models.ForeignKey(CreditProvider, related_name="credit_requests", on_delete=models.CASCADE)
parameters = JSONField()
REQUEST_STATUS_PENDING = "pending"
REQUEST_STATUS_APPROVED = "approved"
REQUEST_STATUS_REJECTED = "rejected"
REQUEST_STATUS_CHOICES = (
(REQUEST_STATUS_PENDING, "Pending"),
(REQUEST_STATUS_APPROVED, "Approved"),
(REQUEST_STATUS_REJECTED, "Rejected"),
)
status = models.CharField(
max_length=255,
choices=REQUEST_STATUS_CHOICES,
default=REQUEST_STATUS_PENDING
)
class Meta(object):
# Enforce the constraint that each user can have exactly one outstanding
# request to a given provider. Multiple requests use the same UUID.
unique_together = ('username', 'course', 'provider')
get_latest_by = 'created'
@classmethod
def retire_user(cls, retirement):
"""
Obfuscates CreditRecord instances associated with `original_username`.
Empties the records' `parameters` field and replaces username with its
anonymized value, `retired_username`.
"""
num_updated_credit_requests = cls.objects.filter(
username=retirement.original_username
).update(
username=retirement.retired_username,
parameters={},
)
return num_updated_credit_requests > 0
@classmethod
def credit_requests_for_user(cls, username):
"""
Retrieve all credit requests for a user.
Arguments:
username (unicode): The username of the user.
Returns: list
Example Usage:
>>> CreditRequest.credit_requests_for_user("bob")
[
{
"uuid": "557168d0f7664fe59097106c67c3f847",
"timestamp": 1434631630,
"course_key": "course-v1:HogwartsX+Potions101+1T2015",
"provider": {
"id": "HogwartsX",
"display_name": "Hogwarts School of Witchcraft and Wizardry",
},
"status": "pending" # or "approved" or "rejected"
}
]
"""
return [
{
"uuid": request.uuid,
"timestamp": request.parameters.get("timestamp"),
"course_key": request.course.course_key,
"provider": {
"id": request.provider.provider_id,
"display_name": request.provider.display_name
},
"status": request.status
}
for request in cls.objects.select_related('course', 'provider').filter(username=username)
]
@classmethod
def get_user_request_status(cls, username, course_key):
"""
Returns the latest credit request of user against the given course.
Args:
username(str): The username of requesting user
course_key(CourseKey): The course identifier
Returns:
CreditRequest if any otherwise None
"""
try:
return cls.objects.filter(
username=username, course__course_key=course_key
).select_related('course', 'provider').latest()
except cls.DoesNotExist:
return None
def __str__(self):
"""Unicode representation of a credit request."""
return u"{course}, {provider}, {status}".format(
course=self.course.course_key,
provider=self.provider.provider_id,
status=self.status,
)
@python_2_unicode_compatible
class CreditConfig(ConfigurationModel):
"""
Manage credit configuration
.. no_pii:
"""
CACHE_KEY = 'credit.providers.api.data'
cache_ttl = models.PositiveIntegerField(
verbose_name=ugettext_lazy("Cache Time To Live"),
default=0,
help_text=ugettext_lazy(
"Specified in seconds. Enable caching by setting this to a value greater than 0."
)
)
@property
def is_cache_enabled(self):
"""Whether responses from the commerce API will be cached."""
return self.enabled and self.cache_ttl > 0
def __str__(self):
"""Unicode representation of the config. """
return 'Credit Configuration'<|fim▁end|>
| |
<|file_name|>application-insights.ts<|end_file_name|><|fim▁begin|>export interface AIMonthlySummary {
successCount: number;
failedCount: number;
}
export interface AIInvocationTrace {
timestamp: string;
timestampFriendly: string;
id: string;
name: string;
success: boolean;
resultCode: string;
duration: number;
operationId: string;
invocationId: string;
}
export interface AIInvocationTraceHistory {
rowId: number;
timestamp: string;
timestampFriendly: string;
message: string;
logLevel: string;
}
export interface AIQueryResult {
tables: AIQueryResultTable[];
}
export interface AIQueryResultTable {
name: string;
columns: AIQueryResultTableColumn[];
rows: any[][];
}
export interface AIQueryResultTableColumn {
columnName: string;
dataType: string;
columnType: string;
}
export interface ApplicationInsight {
AppId: string;
<|fim▁hole|> ApplicationId: string;
CreationDate: Date;
InstrumentationKey: string;
Name: string;
}<|fim▁end|>
|
Application_Type: string;
|
<|file_name|>event.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::EventBinding;
use dom::bindings::codegen::EventBinding::EventConstants;
use dom::bindings::js::JS;
use dom::bindings::utils::{Reflectable, Reflector, reflect_dom_object};
use dom::bindings::error::Fallible;
use dom::eventtarget::EventTarget;
use dom::window::Window;
use servo_util::str::DOMString;
use geom::point::Point2D;
pub enum Event_ {
ResizeEvent(uint, uint),
ReflowEvent,
ClickEvent(uint, Point2D<f32>),
MouseDownEvent(uint, Point2D<f32>),
MouseUpEvent(uint, Point2D<f32>),
MouseMoveEvent(Point2D<f32>)
}
#[deriving(Encodable)]
pub enum EventPhase {
PhaseNone = EventConstants::NONE,
PhaseCapturing = EventConstants::CAPTURING_PHASE,
PhaseAtTarget = EventConstants::AT_TARGET,
PhaseBubbling = EventConstants::BUBBLING_PHASE,
}
#[deriving(Eq, Encodable)]
pub enum EventTypeId {
HTMLEventTypeId,
UIEventTypeId,
MouseEventTypeId,
KeyEventTypeId
}
#[deriving(Encodable)]
pub struct Event {
type_id: EventTypeId,
reflector_: Reflector,
current_target: Option<JS<EventTarget>>,
target: Option<JS<EventTarget>>,
type_: DOMString,
phase: EventPhase,
default_prevented: bool,
stop_propagation: bool,
stop_immediate: bool,
cancelable: bool,
bubbles: bool,
trusted: bool,
dispatching: bool,
initialized: bool,
}
impl Event {
pub fn new_inherited(type_id: EventTypeId) -> Event {
Event {
type_id: type_id,
reflector_: Reflector::new(),
current_target: None,
target: None,
phase: PhaseNone,
type_: ~"",
default_prevented: false,
cancelable: true,
bubbles: true,
trusted: false,
dispatching: false,
stop_propagation: false,
stop_immediate: false,
initialized: false,
}
}
pub fn new(window: &JS<Window>) -> JS<Event> {
reflect_dom_object(~Event::new_inherited(HTMLEventTypeId),
window,
EventBinding::Wrap)
}
pub fn EventPhase(&self) -> u16 {
self.phase as u16
}
pub fn Type(&self) -> DOMString {
self.type_.clone()
}
pub fn GetTarget(&self) -> Option<JS<EventTarget>> {
self.target.clone()
}
pub fn GetCurrentTarget(&self) -> Option<JS<EventTarget>> {
self.current_target.clone()
}
pub fn DefaultPrevented(&self) -> bool {
self.default_prevented
}
pub fn PreventDefault(&mut self) {
if self.cancelable {
self.default_prevented = true
}
}
pub fn StopPropagation(&mut self) {
self.stop_propagation = true;
}
pub fn StopImmediatePropagation(&mut self) {
self.stop_immediate = true;
self.stop_propagation = true;
}
pub fn Bubbles(&self) -> bool {
self.bubbles
}
pub fn Cancelable(&self) -> bool {
self.cancelable
}
pub fn TimeStamp(&self) -> u64 {
0
}
pub fn InitEvent(&mut self,
type_: DOMString,
bubbles: bool,
cancelable: bool) {
self.type_ = type_;
self.cancelable = cancelable;
self.bubbles = bubbles;
self.initialized = true;
}
<|fim▁hole|>
pub fn Constructor(global: &JS<Window>,
type_: DOMString,
init: &EventBinding::EventInit) -> Fallible<JS<Event>> {
let mut ev = Event::new(global);
ev.get_mut().InitEvent(type_, init.bubbles, init.cancelable);
Ok(ev)
}
}
impl Reflectable for Event {
fn reflector<'a>(&'a self) -> &'a Reflector {
&self.reflector_
}
fn mut_reflector<'a>(&'a mut self) -> &'a mut Reflector {
&mut self.reflector_
}
}<|fim▁end|>
|
pub fn IsTrusted(&self) -> bool {
self.trusted
}
|
<|file_name|>tokenizer.py<|end_file_name|><|fim▁begin|># coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#<|fim▁hole|># PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from mongodict import MongoDict
from nltk import word_tokenize, sent_tokenize
from pypln.backend.celery_task import PyPLNTask
class Tokenizer(PyPLNTask):
def process(self, document):
text = document['text']
tokens = word_tokenize(text)
sentences = [word_tokenize(sent) for sent in sent_tokenize(text)]
return {'tokens': tokens, 'sentences': sentences}<|fim▁end|>
| |
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>fn main() {
let mut highest: u32 = 0;
for x in 1..1000 {
for y in 1..1000 {
let multiple: u32 = x * y;
if (multiple > highest) && is_palindrome(&multiple.to_string()) {
highest = multiple;
}
}
}
println!("The answer is {}", highest);
}
fn is_palindrome(word: &str) -> bool {
let reverse = word.chars().rev().collect::<String>();
if word == reverse {
return true;
}<|fim▁hole|><|fim▁end|>
|
return false;
}
|
<|file_name|>CreateIterResultObject.d.ts<|end_file_name|><|fim▁begin|>import CreateIterResultObject = require('../2016/CreateIterResultObject');<|fim▁hole|><|fim▁end|>
|
export = CreateIterResultObject;
|
<|file_name|>asha-confirm-paymnet-model.ts<|end_file_name|><|fim▁begin|>export class AshaConfirmPaymentModel{
p_101: number ;
p_102: number ;
p_103: number ;
p_104: number ;
p_105: number ;
p_106: number ;
p_107: number ;
p_108: number ;
p_109: number ;
p_110: number ;
p_111: number ;
p_112: number ;
p_113: number ;
p_114: number ;<|fim▁hole|> p_116: number ;
p_117: number ;
p_118: number ;
p_119: number ;
p_120: number ;
p_121: number ;
p_122: number ;
p_123: number ;
p_124: number ;
p_125: number ;
p_126: number ;
p_127: number ;
p_128: number ;
p_129: number ;
p_130: number ;
p_131: number ;
p_132: number ;
p_133: number ;
p_134: number ;
p_135: number ;
p_136: number ;
p_137: number ;
p_138: number ;
p_139: number ;
p_140: number ;
p_141: number ;
total :number;
constructor(){
}
}<|fim▁end|>
|
p_115: number ;
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from jx_elasticsearch.es52.painless._utils import Painless, LIST_TO_PIPE
from jx_elasticsearch.es52.painless.add_op import AddOp
from jx_elasticsearch.es52.painless.and_op import AndOp
from jx_elasticsearch.es52.painless.basic_add_op import BasicAddOp
from jx_elasticsearch.es52.painless.basic_eq_op import BasicEqOp<|fim▁hole|>from jx_elasticsearch.es52.painless.basic_mul_op import BasicMulOp
from jx_elasticsearch.es52.painless.basic_starts_with_op import BasicStartsWithOp
from jx_elasticsearch.es52.painless.basic_substring_op import BasicSubstringOp
from jx_elasticsearch.es52.painless.boolean_op import BooleanOp
from jx_elasticsearch.es52.painless.case_op import CaseOp
from jx_elasticsearch.es52.painless.coalesce_op import CoalesceOp
from jx_elasticsearch.es52.painless.concat_op import ConcatOp
from jx_elasticsearch.es52.painless.count_op import CountOp
from jx_elasticsearch.es52.painless.date_op import DateOp
from jx_elasticsearch.es52.painless.div_op import DivOp
from jx_elasticsearch.es52.painless.eq_op import EqOp
from jx_elasticsearch.es52.painless.es_script import EsScript
from jx_elasticsearch.es52.painless.exists_op import ExistsOp
from jx_elasticsearch.es52.painless.exp_op import ExpOp
from jx_elasticsearch.es52.painless.find_op import FindOp
from jx_elasticsearch.es52.painless.first_op import FirstOp
from jx_elasticsearch.es52.painless.floor_op import FloorOp
from jx_elasticsearch.es52.painless.gt_op import GtOp
from jx_elasticsearch.es52.painless.gte_op import GteOp
from jx_elasticsearch.es52.painless.in_op import InOp
from jx_elasticsearch.es52.painless.integer_op import IntegerOp
from jx_elasticsearch.es52.painless.is_number_op import IsNumberOp
from jx_elasticsearch.es52.painless.leaves_op import LeavesOp
from jx_elasticsearch.es52.painless.length_op import LengthOp
from jx_elasticsearch.es52.painless.literal import Literal
from jx_elasticsearch.es52.painless.lt_op import LtOp
from jx_elasticsearch.es52.painless.lte_op import LteOp
from jx_elasticsearch.es52.painless.max_op import MaxOp
from jx_elasticsearch.es52.painless.min_op import MinOp
from jx_elasticsearch.es52.painless.missing_op import MissingOp
from jx_elasticsearch.es52.painless.mod_op import ModOp
from jx_elasticsearch.es52.painless.mul_op import MulOp
from jx_elasticsearch.es52.painless.ne_op import NeOp
from jx_elasticsearch.es52.painless.not_left_op import NotLeftOp
from jx_elasticsearch.es52.painless.not_op import NotOp
from jx_elasticsearch.es52.painless.number_op import NumberOp
from jx_elasticsearch.es52.painless.or_op import OrOp
from jx_elasticsearch.es52.painless.prefix_op import PrefixOp
from jx_elasticsearch.es52.painless.string_op import StringOp
from jx_elasticsearch.es52.painless.sub_op import SubOp
from jx_elasticsearch.es52.painless.suffix_op import SuffixOp
from jx_elasticsearch.es52.painless.tuple_op import TupleOp
from jx_elasticsearch.es52.painless.union_op import UnionOp
from jx_elasticsearch.es52.painless.variable import Variable
from jx_elasticsearch.es52.painless.when_op import WhenOp
from jx_elasticsearch.es52.painless.false_op import FalseOp, false_script
from jx_elasticsearch.es52.painless.true_op import TrueOp, true_script
from jx_elasticsearch.es52.painless.null_op import NullOp, null_script
Painless.register_ops(vars())<|fim▁end|>
|
from jx_elasticsearch.es52.painless.basic_index_of_op import BasicIndexOfOp
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public<|fim▁hole|>//!
//! Attributes this crate provides:
//!
//! - `#[privatize]` : Forces all fields in a struct/enum to be private
//! - `#[derive(JSTraceable)]` : Auto-derives an implementation of `JSTraceable` for a struct in the script crate
//! - `#[must_root]` : Prevents data of the marked type from being used on the stack.
//! See the lints module for more details
//! - `#[dom_struct]` : Implies `#[privatize]`,`#[derive(JSTraceable)]`, and `#[must_root]`.
//! Use this for structs that correspond to a DOM type
#![feature(plugin_registrar, quote, plugin, box_syntax, rustc_private)]
#[macro_use]
extern crate syntax;
#[macro_use]
extern crate rustc;
extern crate tenacious;
use rustc::lint::LintPassObject;
use rustc::plugin::Registry;
use syntax::ext::base::*;
use syntax::parse::token::intern;
// Public for documentation to show up
/// Handles the auto-deriving for `#[derive(JSTraceable)]`
pub mod jstraceable;
/// Handles the auto-deriving for `#[derive(HeapSizeOf)]`
pub mod heap_size;
/// Autogenerates implementations of Reflectable on DOM structs
pub mod reflector;
pub mod lints;
/// Utilities for writing plugins
pub mod utils;
pub mod casing;
#[plugin_registrar]
pub fn plugin_registrar(reg: &mut Registry) {
reg.register_syntax_extension(intern("dom_struct"), MultiModifier(box jstraceable::expand_dom_struct));
reg.register_syntax_extension(intern("derive_JSTraceable"), MultiDecorator(box jstraceable::expand_jstraceable));
reg.register_syntax_extension(intern("_generate_reflector"), MultiDecorator(box reflector::expand_reflector));
reg.register_syntax_extension(intern("derive_HeapSizeOf"), MultiDecorator(box heap_size::expand_heap_size));
reg.register_macro("to_lower", casing::expand_lower);
reg.register_macro("to_upper", casing::expand_upper);
reg.register_lint_pass(box lints::transmute_type::TransmutePass as LintPassObject);
reg.register_lint_pass(box lints::unrooted_must_root::UnrootedPass as LintPassObject);
reg.register_lint_pass(box lints::privatize::PrivatizePass as LintPassObject);
reg.register_lint_pass(box lints::inheritance_integrity::InheritancePass as LintPassObject);
reg.register_lint_pass(box lints::str_to_string::StrToStringPass as LintPassObject);
reg.register_lint_pass(box lints::ban::BanPass as LintPassObject);
reg.register_lint_pass(box tenacious::TenaciousPass as LintPassObject);
}<|fim▁end|>
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Servo's compiler plugin/macro crate
|
<|file_name|>test_pdb_chain.py<|end_file_name|><|fim▁begin|><|fim▁hole|>#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit Tests for `pdb_chain`.
"""
import os
import sys
import unittest
from config import data_dir
from utils import OutputCapture
class TestTool(unittest.TestCase):
"""
Generic class for testing tools.
"""
def setUp(self):
# Dynamically import the module
name = 'pdbtools.pdb_chain'
self.module = __import__(name, fromlist=[''])
def exec_module(self):
"""
Execs module.
"""
with OutputCapture() as output:
try:
self.module.main()
except SystemExit as e:
self.retcode = e.code
self.stdout = output.stdout
self.stderr = output.stderr
return
def test_default(self):
"""$ pdb_chain data/dummy.pdb"""
# Simulate input
sys.argv = ['', os.path.join(data_dir, 'dummy.pdb')]
# Execute the script
self.exec_module()
# Validate results
self.assertEqual(self.retcode, 0) # ensure the program exited OK.
self.assertEqual(len(self.stdout), 204) # no lines deleted
self.assertEqual(len(self.stderr), 0) # no errors
records = (('ATOM', 'HETATM'))
chain_ids = [l[21] for l in self.stdout if l.startswith(records)]
unique_chain_ids = list(set(chain_ids))
self.assertEqual(unique_chain_ids, [' '])
def test_two_options(self):
"""$ pdb_chain -X data/dummy.pdb"""
sys.argv = ['', '-X', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 0)
self.assertEqual(len(self.stdout), 204)
self.assertEqual(len(self.stderr), 0)
records = (('ATOM', 'HETATM'))
chain_ids = [l[21] for l in self.stdout if l.startswith(records)]
unique_chain_ids = list(set(chain_ids))
self.assertEqual(unique_chain_ids, ['X'])
def test_file_not_found(self):
"""$ pdb_chain -A not_existing.pdb"""
afile = os.path.join(data_dir, 'not_existing.pdb')
sys.argv = ['', '-A', afile]
self.exec_module()
self.assertEqual(self.retcode, 1) # exit code is 1 (error)
self.assertEqual(len(self.stdout), 0) # nothing written to stdout
self.assertEqual(self.stderr[0][:22],
"ERROR!! File not found") # proper error message
def test_file_missing(self):
"""$ pdb_chain -A"""
sys.argv = ['', '-A']
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr[0],
"ERROR!! No data to process!")
def test_helptext(self):
"""$ pdb_chain"""
sys.argv = ['']
self.exec_module()
self.assertEqual(self.retcode, 1) # ensure the program exited gracefully.
self.assertEqual(len(self.stdout), 0) # no output
self.assertEqual(self.stderr, self.module.__doc__.split("\n")[:-1])
def test_invalid_option(self):
"""$ pdb_chain -AH data/dummy.pdb"""
sys.argv = ['', '-AH', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0][:47],
"ERROR!! Chain identifiers must be a single char")
def test_not_an_option(self):
"""$ pdb_chain A data/dummy.pdb"""
sys.argv = ['', 'A', os.path.join(data_dir, 'dummy.pdb')]
self.exec_module()
self.assertEqual(self.retcode, 1)
self.assertEqual(len(self.stdout), 0)
self.assertEqual(self.stderr[0],
"ERROR! First argument is not an option: 'A'")
if __name__ == '__main__':
from config import test_dir
mpath = os.path.abspath(os.path.join(test_dir, '..'))
sys.path.insert(0, mpath) # so we load dev files before any installation
unittest.main()<|fim▁end|>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 João Pedro Rodrigues
|
<|file_name|>signup_page.py<|end_file_name|><|fim▁begin|>from givabit.backend.errors import AlreadyExistsException
from givabit.backend.user import User
from givabit.backend.user_repository import UserRepository
from givabit.webapp.base_page import BasePage
from givabit.webapp.url import Url
class SignupPage(BasePage):
def __init__(self, request, response, user_repository=None):
BasePage.__init__(self, request, response)<|fim▁hole|> self.write_template('signup', {'title': 'Givabit - Sign up'})
def post(self):
POST = self.request.POST
email = POST['email']
user = User(email=email)
try:
self.user_repo.create_unconfirmed_user(user=user, send_email=True)
response = self.redirect(Url().for_page('signedup'))
return response
except AlreadyExistsException:
self.write_template('signup', {'title': 'Givabit - Sign up', 'success': False, 'error': 'User already exists'})<|fim▁end|>
|
self.user_repo = user_repository if user_repository is not None else UserRepository()
def get(self):
|
<|file_name|>issue-69446-fnmut-capture.rs<|end_file_name|><|fim▁begin|>// Regression test for issue #69446 - we should display
// which variable is captured
// edition:2018
use core::future::Future;
struct Foo;
impl Foo {
fn foo(&mut self) {}
}
async fn bar<T>(_: impl FnMut() -> T)
where
T: Future<Output = ()>,
{}<|fim▁hole|>
fn main() {
let mut x = Foo;
bar(move || async { //~ ERROR captured
x.foo();
});
}<|fim▁end|>
| |
<|file_name|>regions-ret-borrowed.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your<|fim▁hole|>// except according to those terms.
// Ensure that you cannot use generic types to return a region outside
// of its bound. Here, in the `return_it()` fn, we call with() but
// with R bound to &int from the return_it. Meanwhile, with()
// provides a value that is only good within its own stack frame. This
// used to successfully compile because we failed to account for the
// fact that fn(x: &int) rebound the region &.
fn with<R, F>(f: F) -> R where F: FnOnce(&int) -> R {
f(&3)
}
fn return_it<'a>() -> &'a int {
with(|o| o)
//~^ ERROR cannot infer
}
fn main() {
let x = return_it();
println!("foo={}", *x);
}<|fim▁end|>
|
// option. This file may not be copied, modified, or distributed
|
<|file_name|>QueueArray.java<|end_file_name|><|fim▁begin|>package code.template;
/**
*
* @author
*/
public class QueueArray {
int QUEUE_SIZE;
int front;
int back;
Integer[] queueArray;
public QueueArray(int size) {
queueArray = new Integer[size];
QUEUE_SIZE = size;
}
public void equeue(int putInBackArray) {
if (isFull()) {
System.out.println("Sorry the quere is full");
} else if (isEmpty()) {
front = 0;
back = 0;
} else {
back = (back + 1) % QUEUE_SIZE;
}
queueArray[back] = putInBackArray;
}
public int dequeue() {
if (isEmpty()) {
System.out.println("The queue is empty");
} else if (front == back) {
front = back - 1;
} else {
back = back + 1 % QUEUE_SIZE;
}
return queueArray[front];
}
public boolean isEmpty() {
if (front == -1 && back == -1) {
return true;
} else {
return false;
}
}
public boolean isFull() {
if ((back + 1) % QUEUE_SIZE == front) {
return true;
} else {
return false;
}
}
public int size() {
return front;
}
public void print() {
for (int i = 0; i < queueArray.length; i++) {
<|fim▁hole|>
}<|fim▁end|>
|
System.out.println(i+"/t" + queueArray[i]);
}
}
|
<|file_name|>array_ops.rs<|end_file_name|><|fim▁begin|>use crate::ndarray;
use crate::ndarray_ext;
#[cfg(feature = "mkl")]
use crate::ndarray_ext::NdArrayViewMut;
use crate::ndarray_ext::{NdArray, NdArrayView};
use crate::op;
use crate::tensor::Tensor;
use crate::tensor_ops::*;
use crate::Float;
use std::iter::FromIterator;
pub struct ExpandDims;
pub struct Squeeze;
pub struct Slice {
pub indices: Vec<ndarray::SliceOrIndex>,
}
pub struct SliceGrad {
pub indices: Vec<ndarray::SliceOrIndex>,
}
pub struct Split {
pub axis: isize,
pub start_index: isize,
pub end_index: isize,
}
pub struct SplitGrad {
pub axis: isize,
pub start_index: isize,
pub end_index: isize,
}
pub struct Tile {
pub axis: isize,
pub num: usize,
}
pub struct Concat {
pub axis: isize,
}
pub struct ConcatGrad {
pub axis: isize,
pub index: usize,
}
pub struct Clip<T: Float> {
pub min: T,
pub max: T,
}
pub struct ClipGrad<T: Float> {
pub min: T,
pub max: T,
}
pub struct AddN;
pub struct Gather {
pub axis: isize,
pub should_normalize_negative_indices: bool,
}
pub struct GatherGrad {
pub axis: isize,
}
pub struct IndexOp {
pub index: isize,
}
pub struct IndexOpGrad {
pub index: isize,
}
pub struct SetDiff1D;
pub struct Shape;
pub struct Rank;
pub struct Size;
pub struct Reshape;
pub struct InferBinOpShape;
pub struct Assign;
impl<T: Float> op::Op<T> for Assign {
fn compute(&self, ctx: &mut op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
ctx.input_mut(0).assign(&ctx.input(1));
ctx.append_empty_output();
Ok(())
}
fn grad(&self, ctx: &mut op::GradientContext<T>) {
ctx.append_input_grad(None);
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for InferBinOpShape {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let a_shape_float = ctx.input(0);
let b_shape_float = ctx.input(1);
let a_shape = a_shape_float.map(|x| x.to_usize().unwrap()).into_raw_vec();
let b_shape = b_shape_float.map(|x| x.to_usize().unwrap()).into_raw_vec();
let a_is_scalar = ndarray_ext::is_scalar_shape(a_shape.as_slice());
let b_is_scalar = ndarray_ext::is_scalar_shape(b_shape.as_slice());
if !a_is_scalar && !b_is_scalar {
let a_rank = a_shape.len();
let b_rank = b_shape.len();
if a_rank != b_rank {
return Err(op::OpError::IncompatibleShape(
"InferBinOpShape: rank of lhs and rhs must match.".to_string(),
));
}
let max = a_shape
.iter()
.zip(b_shape)
.map(|(a, b)| T::from(a.clone().max(b)).unwrap())
.collect::<Vec<T>>();
ctx.append_output(NdArray::from_shape_vec(ndarray::IxDyn(&[a_rank]), max).unwrap())
} else if !a_is_scalar {
ctx.append_output_view(a_shape_float);
} else {
ctx.append_output_view(b_shape_float);
}
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(None);
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for Shape {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = &ctx.input(0);
let ret = ndarray_ext::shape_of_view(x);
ctx.append_output(ret);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for Rank {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = ctx.input(0);
let ret = NdArray::from_elem(ndarray::IxDyn(&[]), T::from(x.ndim()).unwrap());
ctx.append_output(ret);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for Size {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = ctx.input(0);
let ret = NdArray::from_elem(ndarray::IxDyn(&[]), T::from(x.len()).unwrap());
ctx.append_output(ret);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for Reshape {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = &ctx.input(0);
let shape_arr = &ctx.input(1);
let target = shape_arr
.iter()
.map(|&dim_size| {
if dim_size != -T::one() {
dim_size.to_usize().unwrap()
} else {
let product: T = shape_arr.iter().fold(T::one(), |acc, &x| acc * x);
x.len() / product.neg().to_usize().unwrap()
}
})
.collect::<Vec<_>>();
// If x is *not* a c-contiguous, just copying it for now
// due to current state of ndarray: https://github.com/rust-ndarray/ndarray/issues/390
if x.is_standard_layout() {
if let Ok(a) = x.clone().into_shape(ndarray::IxDyn(target.as_slice())) {
ctx.append_output_view(a);
} else {
let copy = crate::ndarray_ext::deep_copy(x);
if let Ok(a) = copy.into_shape(ndarray::IxDyn(target.as_slice())) {
ctx.append_output(a);
} else {
return Err(op::OpError::IncompatibleShape(format!(
"reshape failed: {:?} vs {:?}",
x.shape(),
target
)));
}
}
} else if let Ok(a) =
ndarray_ext::deep_copy(x).into_shape(ndarray::IxDyn(target.as_slice()))
{
ctx.append_output(a)
} else {
return Err(op::OpError::IncompatibleShape(format!(
"reshape failed: {:?} vs {:?}",
x.shape(),
target
)));
}
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
let gy = ctx.output_grad();
let x = ctx.input(0);
let gx = Tensor::builder(ctx.graph())
.append_input(gy, false)
.append_input(shape(&x), false)
.build(Reshape);
ctx.append_input_grad(Some(gx));
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for SetDiff1D {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x0 = ctx.input(0);
let x1 = &ctx.input(1);
let set_a: crate::FxHashSet<isize> = crate::FxHashSet::from_iter(
x0.as_slice()
.unwrap()
.iter()
.map(|&a| a.to_isize().unwrap()),
);
let set_b: crate::FxHashSet<isize> = crate::FxHashSet::from_iter(
x1.as_slice()
.unwrap()
.iter()
.map(|&a| a.to_isize().unwrap()),
);
let diff = set_a.difference(&set_b);
let mut vec = diff.collect::<Vec<&isize>>();
vec.sort();
let vec = vec
.into_iter()
.map(|&a| T::from(a).unwrap())
.collect::<Vec<T>>();
let len = vec.len();
// safe unwrap
let ret = NdArray::from_shape_vec(ndarray::IxDyn(&[len]), vec).unwrap();
ctx.append_output(ret);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(None);
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for IndexOp {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = ctx.input(0);
let i = if self.index < 0 {
((x.len() as isize) + self.index) as usize
} else {
self.index as usize
};
// unwrap is safe
let flat_x = x.view().into_shape(x.len()).unwrap();
if let Some(ret) = flat_x.get(i) {
ctx.append_output(ndarray::arr0(*ret).into_dyn());
Ok(())
} else {
Err(op::OpError::OutOfBounds(format!(
"access_elem: tried to access index {} in tensor of length {} (shape: {:?})",
i,
x.len(),
x.shape(),
)))
}
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
let op = IndexOpGrad { index: self.index };
let x = ctx.input(0);
let gy = ctx.output_grad();
let gx = Tensor::builder(ctx.graph())
.set_shape(&shape(x))
.append_input(&x, false)
.append_input(&gy, false)
.build(op);
ctx.append_input_grad(Some(gx));
}
}
impl<T: Float> op::Op<T> for IndexOpGrad {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = ctx.input(0);
let gy = &ctx.input(1);
let mut result = NdArray::zeros(x.shape());
let i = if self.index < 0 {
((x.len() as isize) + self.index) as usize
} else {
self.index as usize
};
// unwrap is safe
let len = result.len();
if let Some(a) = result
.view_mut()
.into_shape(len)
.unwrap() // safe unwrap
.get_mut(i)
{
*a = gy[ndarray::IxDyn(&[])];
} else {
return Err(op::OpError::OutOfBounds(format!(
"access_elem: tried to access index {} in tensor of length {} (shape: {:?})",
i,
x.len(),
x.shape(),
)));
}
ctx.append_output(result);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for Gather {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let param = &ctx.input(1);
let indices = &ctx.input(0);
let indices_shape = indices.shape();
let param_shape = param.shape();
let axis = ndarray_ext::normalize_negative_axis(self.axis, param.ndim());
let output_shape: Vec<usize> = {
let former: &[usize] = ¶m_shape[..axis];
let latter: &[usize] = ¶m_shape[axis + 1..];
// doing former + indices.shape() + latter
former
.iter()
.chain(indices_shape)
.chain(latter)
.cloned()
.collect()
};
let flat_indices = if self.should_normalize_negative_indices {
ndarray_ext::normalize_negative_axes(indices, param_shape[axis])
} else {
indices
.map(|a| a.to_usize().expect("Invalid index value"))
.into_raw_vec()
};
let selected = param.select(ndarray::Axis(axis), flat_indices.as_slice());
let ret = selected.into_shape(output_shape.as_slice()).unwrap();
ctx.append_output(ret);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
let x = ctx.input(0);
let x1 = ctx.input(1);
let gy = ctx.output_grad();
let gx = Tensor::builder(ctx.graph())
.append_input(&x, false)
.append_input(&x1, false)
.append_input(&gy, false)
.set_shape(&shape(x))
.build(GatherGrad { axis: self.axis });
ctx.append_input_grad(None);
ctx.append_input_grad(Some(gx));
}
}
impl<T: Float> op::Op<T> for GatherGrad {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let indices = ctx.input(0);
let param = &ctx.input(1);
let param_shape = param.shape();
let gy = &ctx.input(2);
let axis = if self.axis == -1 {
param.ndim()
} else {
self.axis as usize
};
// get read-only view of gy and reshape it
let gy = {
let former = ¶m_shape[..axis];
let latter = ¶m_shape[axis + 1..];
let shape: Vec<usize> = former
.iter()
.chain(&[indices.len()])
.chain(latter)
.cloned()
.collect();
gy.view().into_shape(shape).unwrap()
};
let mut gx = NdArray::zeros(param.shape());
for (gy_sub, &i) in gy.axis_iter(ndarray::Axis(axis)).zip(indices) {
let i = i.to_isize().unwrap();
// get gx's sub view
let gx_sliced = gx.slice_mut(
ndarray::SliceInfo::<_, ndarray::IxDyn>::new(
(0..param.ndim())
.map(|dim| {
if dim == axis {
ndarray::SliceOrIndex::Slice {
start: i,
end: Some(i + 1),
step: 1,
}
} else {
ndarray::SliceOrIndex::Slice {
start: 0,
end: None,
step: 1,
}
}
})
.collect::<Vec<_>>(),
)
.unwrap()
.as_ref(),
);
// squeeze
let mut gx_sliced = gx_sliced.index_axis_move(ndarray::Axis(axis), 0);
// assign gy to sliced view
gx_sliced.zip_mut_with(&gy_sub, |gx, &gy| {
*gx += gy;
});
}
ctx.append_output(gx);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(None);
ctx.append_input_grad(None);
ctx.append_input_grad(None);
}
}
#[cfg(feature = "mkl")]
pub(crate) fn inplace_add_impl<F: Float>(mut a: NdArrayViewMut<F>, b: &NdArrayView<F>) {
use crate::same_type;
use crate::tensor_ops::blas_ffi::{vdAdd, vsAdd, MklInt};
unsafe {
if same_type::<F, f32>() {
vsAdd(
a.len() as MklInt,
a.as_ptr() as *const f32,
b.as_ptr() as *const f32,
a.as_mut_ptr() as *mut f32,
);
return;
} else if same_type::<F, f64>() {
vdAdd(
a.len() as MklInt,
a.as_ptr() as *const f64,
b.as_ptr() as *const f64,
a.as_mut_ptr() as *mut f64,
);
return;
} else {
a += b;
}
}
}
impl<T: Float> op::Op<T> for AddN {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
if 0 == ctx.num_inputs() {
unreachable!()
} else if 1 == ctx.num_inputs() {
let ret = ctx.input(0);
ctx.append_output_view(ret);
} else if 2 == ctx.num_inputs() {
let ret = &ctx.input(0) + &ctx.input(1);
ctx.append_output(ret);
} else {
let mut base = &ctx.input(0) + &ctx.input(1);
for i in 2..ctx.num_inputs() {
#[cfg(feature = "mkl")]
{
inplace_add_impl(base.view_mut(), &ctx.input(i));
}
#[cfg(not(feature = "mkl"))]
{
base += &ctx.input(i);
}
}
ctx.append_output(base);
}
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
for _ in 0..ctx.num_inputs() {
ctx.append_input_grad(Some(ctx.output_grad()));
}
}
}
impl<T: Float> op::Op<T> for Clip<T> {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let ret = ctx.input(0).map(move |a| a.min(self.max).max(self.min));
ctx.append_output(ret);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
let gy = ctx.output_grad();
let x0 = ctx.input(0);
let gx = Tensor::builder(ctx.graph())
.set_shape(&shape(gy))
.append_input(&x0, false)
.append_input(&gy, false)
.build(ClipGrad {
min: self.min,
max: self.max,
});
ctx.append_input_grad(Some(gx));
}
}
impl<T: Float> op::Op<T> for ClipGrad<T> {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let mut ret = ctx.input(0).mapv(move |x| {
// x > min && x < max
T::from((((x > self.min) as i32) as f32) * (((x < self.max) as i32) as f32)).unwrap()
});
ret *= &ctx.input(1);
ctx.append_output(ret);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(None);
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for Concat {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let mut views = Vec::with_capacity(ctx.num_inputs());
for i in 0..ctx.num_inputs() {
views.push(ctx.input(i));
}
let axis = if self.axis < 0 {
(ctx.input(0).ndim() as isize + self.axis) as usize
} else {
self.axis as usize
};
match ndarray::concatenate(ndarray::Axis(axis), views.as_slice()) {
Ok(y) => {
ctx.append_output(y);
Ok(())
}
Err(e) => Err(op::OpError::NdArrayError("concat".to_string(), e)),
}
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
// [x1, x2, x3, ..., gy]
let num_inputs = ctx.num_inputs();
let inputs = ctx.inputs();
for i in 0..num_inputs {
let mut builder = Tensor::builder(ctx.graph())
.set_shape(&shape(ctx.input(0)))
.append_input(&ctx.output_grad(), false);
for input in inputs.iter() {
builder = builder.append_input(input, false);
}
let gx = builder.build(ConcatGrad {
index: i,
axis: self.axis,
});
ctx.append_input_grad(Some(gx));
}
}
}
impl<T: Float> op::Op<T> for ConcatGrad {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let gy = ctx.input(0);
let axis = if self.axis < 0 {
(ctx.input(0).ndim() as isize + self.axis) as usize
} else {
self.axis as usize
};
// make slice indices
let mut start_idx = 0;
for i in 1..self.index {
start_idx += ctx.input(i).shape()[axis];
}
let region_len = ctx.input(self.index + 1).shape()[axis] as isize;
let indices = (0..gy.ndim())
.map(move |_axis| {
if _axis == axis {
// partial region
ndarray::SliceOrIndex::Slice {
start: start_idx as isize,
end: Some(region_len),
step: 1,<|fim▁hole|> // full slice
ndarray::SliceOrIndex::Slice {
start: 0,
end: None,
step: 1,
}
}
})
.collect::<Vec<_>>();
// Clone the *view*
match ndarray::SliceInfo::new(indices) {
Ok(ok) => {
// do slice
let ret = gy.clone().slice_move(ok.as_ref());
ctx.append_output_view(ret);
Ok(())
}
Err(e) => Err(op::OpError::NdArrayError("ConcatGrad: ".to_string(), e)),
}
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
for _ in 0..ctx.num_inputs() {
ctx.append_input_grad(None);
}
}
}
impl<T: Float> op::Op<T> for Tile {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = ctx.input(0);
let axis = ndarray_ext::normalize_negative_axis(self.axis, x.ndim());
let views = vec![x.clone(); self.num];
match ndarray::concatenate(ndarray::Axis(axis), views.as_slice()) {
Ok(ret) => {
ctx.append_output(ret);
Ok(())
}
Err(e) => Err(op::OpError::NdArrayError("tile: ".to_string(), e)),
}
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(Some(reduce_sum(ctx.output_grad(), &[self.axis], true)));
}
}
impl<T: Float> op::Op<T> for Split {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = &ctx.input(0);
let axis = ndarray_ext::normalize_negative_axis(self.axis, x.ndim());
let mut ret = x.clone();
let indices = make_indices_for_split(x, self.start_index, self.end_index, axis);
ret.slice_collapse(&indices);
ctx.append_output_view(ret);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
let op = SplitGrad {
axis: self.axis,
start_index: self.start_index,
end_index: self.end_index,
};
let x = ctx.input(0);
let gy = ctx.output_grad();
let gx = Tensor::builder(ctx.graph())
.append_input(&x, false)
.append_input(&gy, false)
.set_shape(&shape(x))
.build(op);
ctx.append_input_grad(Some(gx));
}
}
impl<T: Float> op::Op<T> for SplitGrad {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = ctx.input(0);
let mut gx = NdArray::zeros(x.shape());
let axis = ndarray_ext::normalize_negative_axis(self.axis, x.ndim());
let indices = make_indices_for_split(&x, self.start_index, self.end_index, axis);
gx.slice_mut(
ndarray::SliceInfo::<_, ndarray::IxDyn>::new(indices)
.unwrap()
.as_ref(),
)
.zip_mut_with(&ctx.input(1), |a, &g| *a = g);
ctx.append_output(gx);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(None);
}
}
#[inline]
fn make_indices_for_split<T: Float>(
x: &NdArrayView<T>,
start_index: isize,
end_index: isize,
axis: usize,
) -> Vec<ndarray::SliceOrIndex> {
let ndim = x.ndim();
assert!(ndim > axis, "Wrong split axis");
(0..ndim)
.map(|i| {
if i == axis {
ndarray::SliceOrIndex::Slice {
start: start_index,
end: Some(end_index),
step: 1,
}
} else {
// full slice
ndarray::SliceOrIndex::Slice {
start: 0,
end: None,
step: 1,
}
}
})
.collect::<Vec<_>>()
}
impl<T: Float> op::Op<T> for Slice {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let mut y = ctx.input(0);
y.slice_collapse(&self.indices);
ctx.append_output_view(y);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
let op = SliceGrad {
indices: self.indices.clone(),
};
let x = ctx.input(0);
let gy = ctx.output_grad();
let gx = Tensor::builder(ctx.graph())
.append_input(&x, false)
.append_input(&gy, false)
.set_shape(&shape(x))
.build(op);
ctx.append_input_grad(Some(gx));
}
}
impl<T: Float> op::Op<T> for SliceGrad {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let x = ctx.input(0);
let mut gx = NdArray::zeros(x.shape());
// sliced view
gx.slice_mut(
ndarray::SliceInfo::<_, ndarray::IxDyn>::new(&self.indices)
.unwrap()
.as_ref(),
)
.zip_mut_with(&ctx.input(1), |a, &g| *a = g);
ctx.append_output(gx);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
// is this ok?
ctx.append_input_grad(None);
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for Squeeze {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let mut x = ctx.input(0).clone();
let mut axes = ctx
.input(1)
.iter()
.map(|a| a.to_isize().unwrap())
.collect::<Vec<_>>();
axes.sort();
for (adjust, &i) in axes.iter().enumerate() {
let axis = if i < 0 {
(x.ndim() as isize + i as isize) as usize
} else {
i as usize
};
let axis = axis - adjust;
assert_eq!(1, x.shape()[axis], "Can't squeeze a dim whose size != 1");
// axis making ok
x = x.index_axis_move(ndarray::Axis(axis), 0);
}
ctx.append_output_view(x);
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(Some(expand_dims(ctx.output_grad(), &ctx.input(1))));
ctx.append_input_grad(None);
}
}
impl<T: Float> op::Op<T> for ExpandDims {
fn compute(&self, ctx: &mut crate::op::ComputeContext<T>) -> Result<(), crate::op::OpError> {
let ret = ctx.input(0);
let mut axes = ctx
.input(1)
.iter()
.map(|a| a.to_isize().unwrap())
.collect::<Vec<_>>();
axes.sort();
let mut output_shape = ret.shape().to_vec();
for &i in axes.iter() {
let axis = if i < 0 {
(ret.ndim() as isize + i as isize) as usize
} else {
i as usize
};
output_shape.insert(axis, 1);
}
ctx.append_output_view(ret.into_shape(output_shape).unwrap());
Ok(())
}
fn grad(&self, ctx: &mut crate::op::GradientContext<T>) {
ctx.append_input_grad(Some(squeeze(ctx.output_grad(), &ctx.input(1))));
ctx.append_input_grad(None);
}
}<|fim▁end|>
|
}
} else {
|
<|file_name|>conf.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Backend.AI Library documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 1 21:26:20 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Backend.AI API Documentation'
copyright = '2015-2020, Lablup Inc.'
author = 'Lablup Inc.'<|fim▁hole|># The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v5.20191215'
# The full version, including alpha/beta/rc tags.
release = '20.03'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
highlight_language = 'python3'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
numfig = True
intersphinx_mapping = {
'client-py':
('https://client-py.docs.backend.ai/en/latest/', None),
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'BackendAIAPIDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'BackendAIDoc.tex', 'Backend.AI API Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'backend.ai', 'Backend.AI API Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Backend.AI', 'Backend.AI API Documentation',
author, 'Backend.AI', 'Backend.AI is a hassle-free backend for AI programming and service.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False<|fim▁end|>
| |
<|file_name|>decompose.js<|end_file_name|><|fim▁begin|>define(["./_base", "dojo/_base/lang", "./matrix"], <|fim▁hole|> // compare two FP numbers for equality
return Math.abs(a - b) <= 1e-6 * (Math.abs(a) + Math.abs(b)); // Boolean
}
function calcFromValues(/* Number */ r1, /* Number */ m1, /* Number */ r2, /* Number */ m2){
// summary:
// uses two close FP ration and their original magnitudes to approximate the result
if(!isFinite(r1)){
return r2; // Number
}else if(!isFinite(r2)){
return r1; // Number
}
m1 = Math.abs(m1); m2 = Math.abs(m2);
return (m1 * r1 + m2 * r2) / (m1 + m2); // Number
}
function transpose(matrix){
// matrix: dojox/gfx/matrix.Matrix2D
// a 2D matrix-like object
var M = new m.Matrix2D(matrix);
return lang.mixin(M, {dx: 0, dy: 0, xy: M.yx, yx: M.xy}); // dojox/gfx/matrix.Matrix2D
}
function scaleSign(/* dojox/gfx/matrix.Matrix2D */ matrix){
return (matrix.xx * matrix.yy < 0 || matrix.xy * matrix.yx > 0) ? -1 : 1; // Number
}
function eigenvalueDecomposition(matrix){
// matrix: dojox/gfx/matrix.Matrix2D
// a 2D matrix-like object
var M = m.normalize(matrix),
b = -M.xx - M.yy,
c = M.xx * M.yy - M.xy * M.yx,
d = Math.sqrt(b * b - 4 * c),
l1 = -(b + (b < 0 ? -d : d)) / 2,
l2 = c / l1,
vx1 = M.xy / (l1 - M.xx), vy1 = 1,
vx2 = M.xy / (l2 - M.xx), vy2 = 1;
if(eq(l1, l2)){
vx1 = 1, vy1 = 0, vx2 = 0, vy2 = 1;
}
if(!isFinite(vx1)){
vx1 = 1, vy1 = (l1 - M.xx) / M.xy;
if(!isFinite(vy1)){
vx1 = (l1 - M.yy) / M.yx, vy1 = 1;
if(!isFinite(vx1)){
vx1 = 1, vy1 = M.yx / (l1 - M.yy);
}
}
}
if(!isFinite(vx2)){
vx2 = 1, vy2 = (l2 - M.xx) / M.xy;
if(!isFinite(vy2)){
vx2 = (l2 - M.yy) / M.yx, vy2 = 1;
if(!isFinite(vx2)){
vx2 = 1, vy2 = M.yx / (l2 - M.yy);
}
}
}
var d1 = Math.sqrt(vx1 * vx1 + vy1 * vy1),
d2 = Math.sqrt(vx2 * vx2 + vy2 * vy2);
if(!isFinite(vx1 /= d1)){ vx1 = 0; }
if(!isFinite(vy1 /= d1)){ vy1 = 0; }
if(!isFinite(vx2 /= d2)){ vx2 = 0; }
if(!isFinite(vy2 /= d2)){ vy2 = 0; }
return { // Object
value1: l1,
value2: l2,
vector1: {x: vx1, y: vy1},
vector2: {x: vx2, y: vy2}
};
}
function decomposeSR(/* dojox/gfx/matrix.Matrix2D */ M, /* Object */ result){
// summary:
// decomposes a matrix into [scale, rotate]; no checks are done.
var sign = scaleSign(M),
a = result.angle1 = (Math.atan2(M.yx, M.yy) + Math.atan2(-sign * M.xy, sign * M.xx)) / 2,
cos = Math.cos(a), sin = Math.sin(a);
result.sx = calcFromValues(M.xx / cos, cos, -M.xy / sin, sin);
result.sy = calcFromValues(M.yy / cos, cos, M.yx / sin, sin);
return result; // Object
}
function decomposeRS(/* dojox/gfx/matrix.Matrix2D */ M, /* Object */ result){
// summary:
// decomposes a matrix into [rotate, scale]; no checks are done
var sign = scaleSign(M),
a = result.angle2 = (Math.atan2(sign * M.yx, sign * M.xx) + Math.atan2(-M.xy, M.yy)) / 2,
cos = Math.cos(a), sin = Math.sin(a);
result.sx = calcFromValues(M.xx / cos, cos, M.yx / sin, sin);
result.sy = calcFromValues(M.yy / cos, cos, -M.xy / sin, sin);
return result; // Object
}
return g.decompose = function(matrix){
// summary:
// Decompose a 2D matrix into translation, scaling, and rotation components.
// description:
// This function decompose a matrix into four logical components:
// translation, rotation, scaling, and one more rotation using SVD.
// The components should be applied in following order:
// | [translate, rotate(angle2), scale, rotate(angle1)]
// matrix: dojox/gfx/matrix.Matrix2D
// a 2D matrix-like object
var M = m.normalize(matrix),
result = {dx: M.dx, dy: M.dy, sx: 1, sy: 1, angle1: 0, angle2: 0};
// detect case: [scale]
if(eq(M.xy, 0) && eq(M.yx, 0)){
return lang.mixin(result, {sx: M.xx, sy: M.yy}); // Object
}
// detect case: [scale, rotate]
if(eq(M.xx * M.yx, -M.xy * M.yy)){
return decomposeSR(M, result); // Object
}
// detect case: [rotate, scale]
if(eq(M.xx * M.xy, -M.yx * M.yy)){
return decomposeRS(M, result); // Object
}
// do SVD
var MT = transpose(M),
u = eigenvalueDecomposition([M, MT]),
v = eigenvalueDecomposition([MT, M]),
U = new m.Matrix2D({xx: u.vector1.x, xy: u.vector2.x, yx: u.vector1.y, yy: u.vector2.y}),
VT = new m.Matrix2D({xx: v.vector1.x, xy: v.vector1.y, yx: v.vector2.x, yy: v.vector2.y}),
S = new m.Matrix2D([m.invert(U), M, m.invert(VT)]);
decomposeSR(VT, result);
S.xx *= result.sx;
S.yy *= result.sy;
decomposeRS(U, result);
S.xx *= result.sx;
S.yy *= result.sy;
return lang.mixin(result, {sx: S.xx, sy: S.yy}); // Object
};
});<|fim▁end|>
|
function (g, lang, m){
function eq(/* Number */ a, /* Number */ b){
// summary:
|
<|file_name|>build.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate peg;
use std::io;
use std::io::{Read, Write};
use std::fs;
use std::fs::{DirEntry, File};
use std::path::PathBuf;
use std::env;
fn main() {
peg::cargo_build("src/grammar.rustpeg");
if cfg!(feature = "file-tests") {
generate_tests().unwrap();
}
}
fn generate_tests() -> io::Result<()> {
let out_dir: PathBuf = env::var_os("OUT_DIR").unwrap().into();
let output_path = out_dir.join("file_tests").with_extension("rs");
let mut output_file = File::create(&output_path).unwrap();
output_file.write_all(
b"
use parser;
use runtime::Interpreter;
use ast_walk_interpreter::AstWalkInterpreter;
use typechecker::TypeChecker;
",
)?;
let mut tests = Vec::new();
tests.append(&mut generate_run_pass_tests()?);
tests.append(&mut generate_run_fail_tests()?);
tests.append(&mut generate_typecheck_pass_tests()?);
tests.append(&mut generate_typecheck_fail_tests()?);
let test_fns_str = tests.concat();
output_file.write_all(test_fns_str.as_bytes())?;
Ok(())
}
fn generate_run_pass_tests() -> io::Result<Vec<String>> {
let mut tests = Vec::new();
for entry in fs::read_dir("tests/run-pass")? {
let entry = entry?;
let test_name = test_name_from_entry(&entry, "run_pass");
let mut file = File::open(entry.path()).unwrap();
let mut content = String::new();
file.read_to_string(&mut content)?;
tests.push(make_run_pass_test_fn(&test_name, &content));
}
Ok(tests)
}
fn make_run_pass_test_fn(name: &str, code: &str) -> String {
format!(
"
#[test]
fn {name}() {{
let code = r#\"{code}\"#;
let ast = parser::program(code).unwrap();
let mut ast_walk_interpreter = AstWalkInterpreter::new();
ast_walk_interpreter
.run_ast_as_program(&ast)
.unwrap();
}}
",
name = name,
code = code
)
}
fn generate_run_fail_tests() -> io::Result<Vec<String>> {
let mut tests = Vec::new();
for entry in fs::read_dir("tests/run-fail")? {
let entry = entry?;
if entry.path().extension().unwrap() != "bl" {
continue;
}
let content = read_file(entry.path());
let expected_err_to_str = read_file(entry.path().with_extension("err"));
let test_name = test_name_from_entry(&entry, "run_fail");
tests.push(make_run_fail_test_fn(
&test_name,
&content,
&expected_err_to_str.trim(),
));
}
Ok(tests)
}
fn make_run_fail_test_fn(name: &str, code: &str, expected_err_str: &str) -> String {
format!(
"
#[test]
fn {name}() {{
let code = r#\"{code}\"#;
let ast = parser::program(code).unwrap();
let mut ast_walk_interpreter = AstWalkInterpreter::new();
let err = ast_walk_interpreter
.run_ast_as_program(&ast)
.unwrap_err();
assert_eq!(
format!(\"{{:?}}\", err),
r#\"{expected_err_str}\"#
);
}}
",
name = name,
code = code,
expected_err_str = expected_err_str
)
}
fn generate_typecheck_fail_tests() -> io::Result<Vec<String>> {
let mut tests = Vec::new();
for entry in fs::read_dir("tests/typecheck-fail")? {
let entry = entry?;
if entry.path().extension().unwrap() != "bl" {
continue;
}
let content = read_file(entry.path());
let expected_err_to_str = read_file(entry.path().with_extension("err"));
let test_name = test_name_from_entry(&entry, "typecheck_fail");
tests.push(make_typecheck_fail_test_fn(
&test_name,
&content,
&expected_err_to_str.trim(),
));
}
Ok(tests)
}
fn make_typecheck_fail_test_fn(name: &str, code: &str, expected_err_str: &str) -> String {
format!(
"
#[test]
fn {name}() {{
let code = r#\"{code}\"#;
let ast = parser::program(code).unwrap();
let mut checker = TypeChecker::new();
checker.check_program(&ast);
let issues = checker.get_issues();
assert!(!issues.is_empty());
assert_eq!(
format!(\"{{:?}}\", issues),
r#\"{expected_err_str}\"#
);
}}
",
name = name,
code = code,
expected_err_str = expected_err_str
)
}
fn generate_typecheck_pass_tests() -> io::Result<Vec<String>> {
let mut tests = Vec::new();
for entry in fs::read_dir("tests/typecheck-pass")? {
let entry = entry?;
if entry.path().extension().unwrap() != "bl" {
continue;
}
let content = read_file(entry.path());
let test_name = test_name_from_entry(&entry, "typecheck_pass");
tests.push(make_typecheck_pass_test_fn(&test_name, &content));
}
Ok(tests)
}
fn make_typecheck_pass_test_fn(name: &str, code: &str) -> String {
format!(
"
#[test]
fn {name}() {{
let code = r#\"{code}\"#;
let ast = parser::program(code).unwrap();
let mut checker = TypeChecker::new();
checker.check_program(&ast);
let issues = checker.get_issues();<|fim▁hole|> println!(\"{{:?}}\", issues);
assert_eq!(issues, []);
}}
",
name = name,
code = code
)
}
fn read_file(path: PathBuf) -> String {
let mut file = File::open(path).unwrap();
let mut content = String::new();
file.read_to_string(&mut content).unwrap();
content
}
fn test_name_from_entry(entry: &DirEntry, prefix: &str) -> String {
let path = entry.path();
let file_stem = path.file_stem();
let partial_test_name = file_stem
.unwrap()
.to_str()
.unwrap()
.to_owned()
.replace("-", "_");
prefix.to_owned() + "_" + &partial_test_name
}<|fim▁end|>
| |
<|file_name|>test_record_test.py<|end_file_name|><|fim▁begin|># Lint as: python3
"""Unit tests for test_record module."""
import sys
import unittest
from openhtf.core import test_record
def _get_obj_size(obj):
size = 0
for attr in obj.__slots__: # pytype: disable=attribute-error
size += sys.getsizeof(attr)
size += sys.getsizeof(getattr(obj, attr))
return size
class TestRecordTest(unittest.TestCase):
def test_attachment_data(self):
expected_data = b'test attachment data'
attachment = test_record.Attachment(expected_data, 'text')<|fim▁hole|> self.assertEqual(data, expected_data)
def test_attachment_memory_safety(self):
empty_attachment = test_record.Attachment(b'', 'text')
expected_obj_size = _get_obj_size(empty_attachment)
large_data = b'test attachment data' * 1000
attachment = test_record.Attachment(large_data, 'text')
obj_size = _get_obj_size(attachment)
self.assertEqual(obj_size, expected_obj_size)<|fim▁end|>
|
data = attachment.data
|
<|file_name|>contacts.client.service.js<|end_file_name|><|fim▁begin|>'use strict';
//Contacts service used to communicate Contacts REST endpoints
angular.module('contacts').factory('Contacts', ['$resource',
function ($resource) {
return $resource('api/v1/contacts/:contactId', {
contactId: '@_id'
}, {
update: {
method: 'PUT'<|fim▁hole|>]);<|fim▁end|>
|
}
});
}
|
<|file_name|>xgen_pwr.go<|end_file_name|><|fim▁begin|>package pwr
// DO NOT EDIT THIS FILE. GENERATED BY xgen.
import (
"bits"
"mmio"
"unsafe"
"stm32/o/f411xe/mmap"
)
type PWR_Periph struct {
CR RCR
CSR RCSR
}
func (p *PWR_Periph) BaseAddr() uintptr {
return uintptr(unsafe.Pointer(p))
}
//emgo:const
var PWR = (*PWR_Periph)(unsafe.Pointer(uintptr(mmap.PWR_BASE)))
type CR uint32
func (b CR) Field(mask CR) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask CR) J(v int) CR {
return CR(bits.MakeField32(v, uint32(mask)))
}
type RCR struct{ mmio.U32 }
func (r *RCR) Bits(mask CR) CR { return CR(r.U32.Bits(uint32(mask))) }
func (r *RCR) StoreBits(mask, b CR) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RCR) SetBits(mask CR) { r.U32.SetBits(uint32(mask)) }
func (r *RCR) ClearBits(mask CR) { r.U32.ClearBits(uint32(mask)) }
func (r *RCR) Load() CR { return CR(r.U32.Load()) }
func (r *RCR) Store(b CR) { r.U32.Store(uint32(b)) }
func (r *RCR) AtomicStoreBits(mask, b CR) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RCR) AtomicSetBits(mask CR) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RCR) AtomicClearBits(mask CR) { r.U32.AtomicClearBits(uint32(mask)) }
type RMCR struct{ mmio.UM32 }
func (rm RMCR) Load() CR { return CR(rm.UM32.Load()) }
func (rm RMCR) Store(b CR) { rm.UM32.Store(uint32(b)) }
func (p *PWR_Periph) LPDS() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(LPDS)}}
}
func (p *PWR_Periph) PDDS() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(PDDS)}}
}
func (p *PWR_Periph) CWUF() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(CWUF)}}
}
func (p *PWR_Periph) CSBF() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(CSBF)}}
}
func (p *PWR_Periph) PVDE() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(PVDE)}}
}
func (p *PWR_Periph) PLS() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(PLS)}}
}
func (p *PWR_Periph) DBP() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(DBP)}}
}
func (p *PWR_Periph) FPDS() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(FPDS)}}
}
func (p *PWR_Periph) LPLVDS() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(LPLVDS)}}
}
func (p *PWR_Periph) MRLVDS() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(MRLVDS)}}
}
func (p *PWR_Periph) ADCDC1() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(ADCDC1)}}
}
func (p *PWR_Periph) VOS() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(VOS)}}
}
func (p *PWR_Periph) FMSSR() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(FMSSR)}}
}
func (p *PWR_Periph) FISSR() RMCR {
return RMCR{mmio.UM32{&p.CR.U32, uint32(FISSR)}}
}
type CSR uint32
func (b CSR) Field(mask CSR) int {
return bits.Field32(uint32(b), uint32(mask))
}
func (mask CSR) J(v int) CSR {
return CSR(bits.MakeField32(v, uint32(mask)))
}
type RCSR struct{ mmio.U32 }
func (r *RCSR) Bits(mask CSR) CSR { return CSR(r.U32.Bits(uint32(mask))) }
func (r *RCSR) StoreBits(mask, b CSR) { r.U32.StoreBits(uint32(mask), uint32(b)) }
func (r *RCSR) SetBits(mask CSR) { r.U32.SetBits(uint32(mask)) }
func (r *RCSR) ClearBits(mask CSR) { r.U32.ClearBits(uint32(mask)) }
func (r *RCSR) Load() CSR { return CSR(r.U32.Load()) }
func (r *RCSR) Store(b CSR) { r.U32.Store(uint32(b)) }
func (r *RCSR) AtomicStoreBits(mask, b CSR) { r.U32.AtomicStoreBits(uint32(mask), uint32(b)) }
func (r *RCSR) AtomicSetBits(mask CSR) { r.U32.AtomicSetBits(uint32(mask)) }
func (r *RCSR) AtomicClearBits(mask CSR) { r.U32.AtomicClearBits(uint32(mask)) }
type RMCSR struct{ mmio.UM32 }
func (rm RMCSR) Load() CSR { return CSR(rm.UM32.Load()) }
func (rm RMCSR) Store(b CSR) { rm.UM32.Store(uint32(b)) }
func (p *PWR_Periph) WUF() RMCSR {
return RMCSR{mmio.UM32{&p.CSR.U32, uint32(WUF)}}
}
func (p *PWR_Periph) SBF() RMCSR {
return RMCSR{mmio.UM32{&p.CSR.U32, uint32(SBF)}}
}
<|fim▁hole|>func (p *PWR_Periph) BRR() RMCSR {
return RMCSR{mmio.UM32{&p.CSR.U32, uint32(BRR)}}
}
func (p *PWR_Periph) EWUP() RMCSR {
return RMCSR{mmio.UM32{&p.CSR.U32, uint32(EWUP)}}
}
func (p *PWR_Periph) BRE() RMCSR {
return RMCSR{mmio.UM32{&p.CSR.U32, uint32(BRE)}}
}
func (p *PWR_Periph) VOSRDY() RMCSR {
return RMCSR{mmio.UM32{&p.CSR.U32, uint32(VOSRDY)}}
}<|fim▁end|>
|
func (p *PWR_Periph) PVDO() RMCSR {
return RMCSR{mmio.UM32{&p.CSR.U32, uint32(PVDO)}}
}
|
<|file_name|>OpId.java<|end_file_name|><|fim▁begin|>/**
* CArtAgO - DEIS, University of Bologna
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package cartago;<|fim▁hole|>/**
* Unique identifier of an operation (instance)
* executed by an artifact
*
* @author aricci
*
*/
public class OpId implements java.io.Serializable {
private int id;
private ArtifactId aid;
private AgentId agentId;
private String opName;
OpId(ArtifactId aid, String opName, int id, AgentId ctxId){
this.id = id;
this.aid = aid;
this.agentId = ctxId;
this.opName = opName;
}
/**
* Get the numeric identifier of the operation id
*
* @return
*/
public int getId(){
return id;
}
/**
* Get the operation name.
*
* @return
*/
public String getOpName(){
return opName;
}
/**
* Get the id of the artifact where the operation has been executed
*
* @return
*/
public ArtifactId getArtifactId(){
return aid;
}
/**
* Get the identifier of the agent performer of the operation
*
* @return
*/
public AgentId getAgentBodyId(){
return agentId;
}
AgentId getContextId(){
return agentId;
}
public boolean equals(Object obj){
return aid.equals(((OpId)obj).aid) && ((OpId)obj).id==id;
}
public String toString(){
return "opId("+id+","+opName+","+aid+","+agentId+")";
}
}<|fim▁end|>
| |
<|file_name|>EventType.java<|end_file_name|><|fim▁begin|>package org.asciicerebrum.neocortexengine.domain.events;
/**
*
* @author species8472
*/
public enum EventType {
/**
* Event thrown directly after the initialization of a new combat round.
*/
COMBATROUND_POSTINIT,
/**
* Event thrown before the initialization of a new combat round.
*/
COMBATROUND_PREINIT,
/**
* The event of gaining a new condition.
*/
CONDITION_GAIN,
/**
* The event of losing a condition.
*/
CONDITION_LOSE,
/**
* The event of applying the inflicted damage.
*/
DAMAGE_APPLICATION,
/**
* The event of inflicting damage.
*/<|fim▁hole|> END_TURN_END,
/**
* The event of some character starting its turn after the end turn of the
* previous character.
*/
END_TURN_START,
/**
* The event thrown when the single attack hits normally.
*/
SINGLE_ATTACK_HIT,
/**
* The event thrown when the single attack hits critically.
*/
SINGLE_ATTACK_HIT_CRITICAL,
/**
* The event thrown when the single attack misses.
*/
SINGLE_ATTACK_MISS,
/**
* The event thrown before a single attack is performed.
*/
SINGLE_ATTACK_PRE,
}<|fim▁end|>
|
DAMAGE_INFLICTED,
/**
* The event of some character ending its turn.
*/
|
<|file_name|>maps.ts<|end_file_name|><|fim▁begin|>//need the following ///reference line so that ambient @types .d.ts declarations get loaded.
/// <reference types="googlemaps" />
import * as xlib from "xlib";
import _ = xlib.lodash;
const log = new xlib.logging.Logger( __filename );
import Promise = xlib.promise.bluebird;
import __ = xlib.lolo;
import jquery = require( "jquery" );
let _initializeCompletePromise: Promise<string>;
export function initialize(/**
* find/generate a key in the google cloud console, we generated from this link actually: https://developers.google.com/maps/documentation/javascript/get-api-key
* see this stackoverflow question for more details: http://stackoverflow.com/questions/35700182/apinotactivatedmaperror-for-simple-html-page-using-google-places-api/41898012#41898012
*/
mapsApiKey: string ): Promise<string> {
//log.warn("blib.maps.initialize()");
if ( _initializeCompletePromise != null ) {
//init already started, so no op;
//log.warn("blib.maps.initialize() init already started, so no op;");
return _initializeCompletePromise;
}
_initializeCompletePromise = new Promise<string>(( resolve, reject ) => {
const finalUrl = `https://maps.googleapis.com/maps/api/js?key=${ mapsApiKey }&libraries=places`;
jquery.getScript( finalUrl, ( script: string, textStatus: string, jqXHR: JQueryXHR ) => {
//log.warn( "done loading maps script", { script, textStatus, jqXHR } );
//_directionsService = new google.maps.DirectionsService();
// _directionsService.route({
// origin: "chicago, il", //chicago //document.getElementById('start').value,
// destination: "oklahoma city, ok", //oklahoma city //document.getElementById('end').value,
// travelMode: 'DRIVING' as any
// }, function (response, status) {
// console.error("whut whut");
// if (status === google.maps.DirectionsStatus.OK) {
// console.log("directionsService call complete", { response, status });
// } else {
// window.alert('Directions request failed due to ' + status);
// }
// });
resolve( textStatus );
} );
// jquery.ajax( {
// url: finalUrl,
// type: "GET",
// dataType: "jsonp",
// cache: false,
// success: ( response ) => {
// //log.warn("done loading maps script",response);
// resolve( response );
// }
// } );
//resolve(undefined);
} );
if ( __.isDevCodeEnabled === true ) {
_initializeCompletePromise = _initializeCompletePromise.timeout( 10 * 1000, new Error( "mapsApi: maps api script load timeout" ) );
}
return _initializeCompletePromise;
}
let _autocompleteService: google.maps.places.AutocompleteService;
/**
* wraps the AutoCompleteService class, described here: https://developers.google.com/maps/documentation/javascript/reference#AutocompleteService
* @param request
*/
export function getAutocompletePlacePredictions( requestOptions: google.maps.places.AutocompletionRequest, retryAttempt = 0 ): Promise<google.maps.places.AutocompletePrediction[]> {
log.errorAndThrowIfFalse( _initializeCompletePromise != null, "need to call maps.initialize() first" );
if ( xlib.stringHelper.isNullOrEmpty( requestOptions.input ) === true ) {
//no input, so no op
return Promise.resolve( [] );
}
return new Promise<google.maps.places.AutocompletePrediction[]>(( resolve, reject ) => {
if ( _initializeCompletePromise.isRejected() !== false ) {
log.assert( false, "script load failed! investigate", { error: _initializeCompletePromise.reason() } );
}
//make sure our scripts api is loaded first
return _initializeCompletePromise.then(() => {
let _callback = function ( predictions: google.maps.places.AutocompletePrediction[], status: google.maps.places.PlacesServiceStatus ) {
//switch PlacesServiceStatus from https://developers.google.com/maps/documentation/javascript/reference#AutocompleteService
switch ( status ) {
case google.maps.places.PlacesServiceStatus.OK:
return resolve( predictions );
case google.maps.places.PlacesServiceStatus.ZERO_RESULTS:
return resolve( [] );
case google.maps.places.PlacesServiceStatus.UNKNOWN_ERROR:
//try again
if ( retryAttempt > 2 ) {
return reject( new Error( `${ status }:The PlacesService request could not be processed due to a server error. We retried 3 times and now give up.` ) );
} else {
return Promise.delay( retryAttempt * 1000 ).then(() => { return getAutocompletePlacePredictions( requestOptions, retryAttempt + 1 ); } );
}
case google.maps.places.PlacesServiceStatus.OVER_QUERY_LIMIT:
return reject( new Error( `${ status }:The application has gone over its request quota.` ) );
case google.maps.places.PlacesServiceStatus.REQUEST_DENIED:
return reject( new Error( `${ status }:The application is not allowed to use the PlacesService.` ) );
case google.maps.places.PlacesServiceStatus.INVALID_REQUEST:
return reject( new Error( `${ status }:This request was invalid.` ) );
default:
return reject( new Error( `${ status }:Unhandled status type. please contact devs to investigate blib.mapsApi.getAutocompletePlacePredictions() and provide them this error message.` ) );
}
}
if ( _autocompleteService == null ) {
_autocompleteService = new google.maps.places.AutocompleteService();
}
_autocompleteService.getPlacePredictions( requestOptions, _callback );
//_autocompleteService.getQueryPredictions(requestOptions, _callback);
} );
} ).catch(( err ) => {
throw log.error( "error in mapsApi.getAutocompletePlacePredictions()", { err, requestOptions, retryAttempt } );
} );
}
let _placesService: google.maps.places.PlacesService;
let _mapDiv: HTMLDivElement;
/**
* https://developers.google.com/maps/documentation/javascript/places#place_details
* @param request
* @param retryAttempt
*/
export function getPlaceDetails( request: google.maps.places.PlaceDetailsRequest, retryAttempt = 0 ): Promise<google.maps.places.PlaceResult | null> {
log.errorAndThrowIfFalse( _initializeCompletePromise != null, "need to call maps.initialize() first" );
if ( xlib.stringHelper.isNullOrEmpty( request.placeId ) === true ) {
//no input, so reject
return Promise.reject( log.error( "request.placeId not found", { request } ) );
}
return new Promise<google.maps.places.PlaceResult | null>(( resolve, reject ) => {
if ( _initializeCompletePromise.isRejected() !== false ) {
log.assert( false, "script load failed! investigate", { error: _initializeCompletePromise.reason() } );
}
//make sure our scripts api is loaded first
return _initializeCompletePromise.then(() => {
let _callback = function ( place: google.maps.places.PlaceResult, status: google.maps.places.PlacesServiceStatus ) {
//switch PlacesServiceStatus from https://developers.google.com/maps/documentation/javascript/reference#AutocompleteService
switch ( status ) {
case google.maps.places.PlacesServiceStatus.OK:
return resolve( place );
case google.maps.places.PlacesServiceStatus.ZERO_RESULTS:
return resolve( null );
case google.maps.places.PlacesServiceStatus.UNKNOWN_ERROR:
//try again
if ( retryAttempt > 2 ) {
return reject( new Error( `${ status }:The PlacesService request could not be processed due to a server error. We retried 3 times and now give up.` ) );
} else {
//retry with backoff
return Promise.delay( retryAttempt * 1000 ).then(() => { return getPlaceDetails( request, retryAttempt + 1 ); } );
}
case google.maps.places.PlacesServiceStatus.OVER_QUERY_LIMIT:
return reject( new Error( `${ status }:The application has gone over its request quota.` ) );
case google.maps.places.PlacesServiceStatus.REQUEST_DENIED:
return reject( new Error( `${ status }:The application is not allowed to use the PlacesService.` ) );
case google.maps.places.PlacesServiceStatus.INVALID_REQUEST:
return reject( new Error( `${ status }:This request was invalid.` ) );
default:
return reject( new Error( `${ status }:Unhandled status type. please contact devs to investigate blib.mapsApi.getPlaceDetails() and provide them this error message.` ) );
}
}
if ( _mapDiv == null ) {
_mapDiv = document.createElement( "div" );
_mapDiv.id = "map";
document.body.appendChild( _mapDiv );
//var map = new google.maps.Map(_mapDiv, {
// center: { lat: -33.866, lng: 151.196 },
// zoom: 15
//});
//var infowindow = new google.maps.InfoWindow();
}
if ( _placesService == null ) {
_placesService = new google.maps.places.PlacesService( _mapDiv );
}
_placesService.getDetails( request, _callback );
//_autocompleteService.getQueryPredictions(requestOptions, _callback);
} );
} );
}
let _directionsService: google.maps.DirectionsService;
let _directionsDisplay: HTMLDivElement;
/**
* https://developers.google.com/maps/documentation/javascript/directions
* @param request
* @param retryAttempt
*/
export function getDirections( request: google.maps.DirectionsRequest, retryAttempt = 0 ): Promise<{ result: google.maps.DirectionsResult | null, status: google.maps.DirectionsStatus }> {
log.errorAndThrowIfFalse( _initializeCompletePromise != null, "need to call maps.initialize() first" );
if ( request == null || request.origin == null || request.origin == "" || request.destination == null || request.destination == "" ) {
//no input, so reject
return Promise.reject( log.error( "request is missing origin and/or destination", { request } ) );
}
return new Promise<{ result: google.maps.DirectionsResult | null, status: google.maps.DirectionsStatus }>(( resolve, reject ) => {
if ( _initializeCompletePromise.isRejected() !== false ) {
log.assert( false, "script load failed! investigate", { error: _initializeCompletePromise.reason() } );
}
//make sure our scripts api is loaded first
return _initializeCompletePromise.then(() => {
//////////////////////// EXAMPLE, WORKS
// _directionsService = new google.maps.DirectionsService();
// _directionsService.route( {
// origin: "chicago, il", //chicago //document.getElementById('start').value,
// destination: "oklahoma city, ok", //oklahoma city //document.getElementById('end').value,
// travelMode: 'DRIVING' as any
// }, function ( response, status ) {
// console.error( "whut whut" );
// if ( status === google.maps.DirectionsStatus.OK ) {
// console.log( "directionsService call complete", { response, status } );
// } else {
// window.alert( 'Directions request failed due to ' + status );
// }
// resolve( response );
// } );
////////////// BETWEEN SCRATCH
// request = {
// origin: "chicago, il", //chicago //document.getElementById('start').value,
// destination: "oklahoma city, ok", //oklahoma city //document.getElementById('end').value,
// travelMode: 'DRIVING' as any
// };
let _callback = function ( result: google.maps.DirectionsResult, status: google.maps.DirectionsStatus ) {
//switch PlacesServiceStatus from https://developers.google.com/maps/documentation/javascript/reference#AutocompleteService
switch ( status ) {
case google.maps.DirectionsStatus.OK:
return resolve( { result, status } );
case google.maps.DirectionsStatus.ZERO_RESULTS:
case google.maps.DirectionsStatus.NOT_FOUND:
return resolve( { result, status } );
case google.maps.DirectionsStatus.UNKNOWN_ERROR:
//try again
if ( retryAttempt > 2 ) {
return reject( new Error( `${ status }:The maps DirectionsService request could not be processed due to a server error. We retried 3 times and now give up.` ) );
} else {
//retry with backoff
return Promise.delay( retryAttempt * 1000 ).then(() => { return getDirections( request, retryAttempt + 1 ); } );
}
case google.maps.DirectionsStatus.OVER_QUERY_LIMIT:
return reject( new Error( `${ status }:The application has gone over its request quota.` ) );
case google.maps.DirectionsStatus.REQUEST_DENIED:
return reject( new Error( `${ status }:The application is not allowed to use the DirectionsService.` ) );
case google.maps.DirectionsStatus.INVALID_REQUEST:
//return reject( new Error( `${ status }:This request was invalid.` ) );
return reject( new xlib.exception.Exception( `${ status }:This request was invalid.`, { data: { result, status } } ) );
default:
return reject( new Error( `${ status }:Unhandled status type. please contact devs to investigate blib.mapsApi.getDirections() and provide them this error message.` ) );
}
// console.error( "whut whut" );
// if ( status === google.maps.DirectionsStatus.OK ) {
// console.log( "directionsService call complete", { result, status } );
// } else {
// window.alert( 'Directions request failed due to ' + status );
// }
// resolve( result );
}
if ( _directionsService == null ) {
_directionsService = new google.maps.DirectionsService();
}
_directionsService.route( request, _callback );
// // // let _callback = function ( result: google.maps.DirectionsResult, status: google.maps.DirectionsStatus ) {
// // // //switch PlacesServiceStatus from https://developers.google.com/maps/documentation/javascript/reference#AutocompleteService
// // // switch ( status ) {
// // // case google.maps.DirectionsStatus.OK:
// // // return resolve( result );
// // // // case google.maps.DirectionsStatus.ZERO_RESULTS:
// // // // return resolve( null );
// // // // case google.maps.places.PlacesServiceStatus.UNKNOWN_ERROR:
// // // // //try again
// // // // if ( retryAttempt > 2 ) {
// // // // return reject( new Error( `${ status }:The PlacesService request could not be processed due to a server error. We retried 3 times and now give up.` ) );
// // // // } else {<|fim▁hole|> // // // // return Promise.delay( retryAttempt * 1000 ).then(() => { return getPlaceDetails( request, retryAttempt + 1 ); });
// // // // }
// // // // case google.maps.places.PlacesServiceStatus.OVER_QUERY_LIMIT:
// // // // return reject( new Error( `${ status }:The application has gone over its request quota.` ) );
// // // // case google.maps.places.PlacesServiceStatus.REQUEST_DENIED:
// // // // return reject( new Error( `${ status }:The application is not allowed to use the PlacesService.` ) );
// // // // case google.maps.places.PlacesServiceStatus.INVALID_REQUEST:
// // // // return reject( new Error( `${ status }:This request was invalid.` ) );
// // // default:
// // // return reject( new Error( `${ status }:Unhandled status type. please contact devs to investigate blib.mapsApi.getDirections() and provide them this error message.` ) );
// // // }
// // // }
// // // // if ( _mapDiv == null ) {
// // // // _mapDiv = document.createElement( "div" );
// // // // _mapDiv.id = "map";
// // // // document.body.appendChild( _mapDiv );
// // // // //var map = new google.maps.Map(_mapDiv, {
// // // // // center: { lat: -33.866, lng: 151.196 },
// // // // // zoom: 15
// // // // //});
// // // // //var infowindow = new google.maps.InfoWindow();
// // // // }
// // // // if ( _directionsDisplay == null ) {
// // // // _directionsDisplay = new google.maps.DirectionsRenderer();
// // // // }
/////////////////////////////////////////////////////////////////////////////////////////////////
//////////// CODE, DOES NOT WORK
// // // let _callback = function ( result: google.maps.DirectionsResult, status: google.maps.DirectionsStatus ) {
// // // //switch PlacesServiceStatus from https://developers.google.com/maps/documentation/javascript/reference#AutocompleteService
// // // switch ( status ) {
// // // case google.maps.DirectionsStatus.OK:
// // // return resolve( result );
// // // // case google.maps.DirectionsStatus.ZERO_RESULTS:
// // // // return resolve( null );
// // // // case google.maps.places.PlacesServiceStatus.UNKNOWN_ERROR:
// // // // //try again
// // // // if ( retryAttempt > 2 ) {
// // // // return reject( new Error( `${ status }:The PlacesService request could not be processed due to a server error. We retried 3 times and now give up.` ) );
// // // // } else {
// // // // //retry with backoff
// // // // return Promise.delay( retryAttempt * 1000 ).then(() => { return getPlaceDetails( request, retryAttempt + 1 ); });
// // // // }
// // // // case google.maps.places.PlacesServiceStatus.OVER_QUERY_LIMIT:
// // // // return reject( new Error( `${ status }:The application has gone over its request quota.` ) );
// // // // case google.maps.places.PlacesServiceStatus.REQUEST_DENIED:
// // // // return reject( new Error( `${ status }:The application is not allowed to use the PlacesService.` ) );
// // // // case google.maps.places.PlacesServiceStatus.INVALID_REQUEST:
// // // // return reject( new Error( `${ status }:This request was invalid.` ) );
// // // default:
// // // return reject( new Error( `${ status }:Unhandled status type. please contact devs to investigate blib.mapsApi.getDirections() and provide them this error message.` ) );
// // // }
// // // }
// // // // if ( _mapDiv == null ) {
// // // // _mapDiv = document.createElement( "div" );
// // // // _mapDiv.id = "map";
// // // // document.body.appendChild( _mapDiv );
// // // // //var map = new google.maps.Map(_mapDiv, {
// // // // // center: { lat: -33.866, lng: 151.196 },
// // // // // zoom: 15
// // // // //});
// // // // //var infowindow = new google.maps.InfoWindow();
// // // // }
// // // if ( _directionsService == null ) {
// // // _directionsService = new google.maps.DirectionsService();
// // // }
// // // // if ( _directionsDisplay == null ) {
// // // // _directionsDisplay = new google.maps.DirectionsRenderer();
// // // // }
// // // _directionsService.route( request, _callback );
// // // //_autocompleteService.getQueryPredictions(requestOptions, _callback);
} );
} );
}<|fim▁end|>
|
// // // // //retry with backoff
|
<|file_name|>abstractwsgi.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
# (c) 2015 Tuomas Airaksinen
#
# This file is part of automate-wsgi.
#
# automate-wsgi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.<|fim▁hole|># GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with automate-wsgi. If not, see <http://www.gnu.org/licenses/>.
import threading
import socket
import tornado
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.websocket
from traits.api import Instance, Int, CStr, Dict, Str
from automate.common import threaded
from automate.service import AbstractUserService
class TornadoService(AbstractUserService):
"""
Abstract service that provides HTTP server for WSGI applications.
"""
#: Which ip address to listen. Use ``0.0.0.0`` (default) to listen to all local networking interfaces.
http_ipaddr = CStr("0.0.0.0")
#: HTTP (or HTTPS if using SSL) port to listen
http_port = Int(3000)
#: Path to ssl certificate file. If set, SSL will be used.
#:
#: .. tip::
#:
#: You may use script scripts/generate_selfsigned_certificate.sh to generate a
#: self-signed openssl certificate.
ssl_certificate = CStr
#: Path to ssl private key file
ssl_private_key = CStr
#: Number of listener threads to spawn
num_threads = Int(5)
#: Extra static dirs you want to serve. Example::
#:
#: static_dirs = {'/my_static/(.*)': '/path/to/my_static'}
static_dirs = Dict(key_trait=Str, value_trait=Str)
_server = Instance(tornado.ioloop.IOLoop)
_web_thread = Instance(threading.Thread)
@property
def is_alive(self):
return self._web_thread and self._web_thread.is_alive()
def get_wsgi_application(self):
"""
Get WSGI function. Implement this in subclasses.
"""
raise NotImplementedError
def get_websocket(self):
return None
def get_filehandler_class(self):
return tornado.web.StaticFileHandler
def get_tornado_handlers(self):
tornado_handlers = []
websocket = self.get_websocket()
if websocket:
tornado_handlers.append(('/socket', websocket))
for entrypoint, path in self.static_dirs.iteritems():
tornado_handlers.append((entrypoint, self.get_filehandler_class(), {'path': path}))
wsgi_app = self.get_wsgi_application()
if wsgi_app:
wsgi_container = tornado.wsgi.WSGIContainer(wsgi_app)
tornado_handlers.append(('.*', tornado.web.FallbackHandler, dict(fallback=wsgi_container)))
return tornado_handlers
def setup(self):
tornado_app = tornado.web.Application(self.get_tornado_handlers())
if self.ssl_certificate and self.ssl_private_key:
ssl_options = {
"certfile": self.ssl_certificate,
"keyfile": self.ssl_private_key,
}
else:
ssl_options = None
server = tornado.httpserver.HTTPServer(tornado_app, ssl_options=ssl_options)
try:
server.listen(self.http_port, self.http_ipaddr)
except socket.error as e:
self.logger.error('Could not start server: %s', e)
self._server = tornado.ioloop.IOLoop.instance()
if not self._server._running:
self._web_thread = threading.Thread(target=threaded(self._server.start),
name="%s::%s" % (self.system.name, self.__class__.__name__))
self._web_thread.start()
else:
self.logger.debug('Tornado IOLoop already running, no need to start new')
def cleanup(self):
if self.is_alive:
self._server.stop()<|fim▁end|>
|
#
# automate-wsgi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
<|file_name|>devstack.py<|end_file_name|><|fim▁begin|>"""
Specific overrides to the base prod settings to make development easier.
"""
# Silence noisy logs
import logging
from os.path import abspath, dirname, join
from corsheaders.defaults import default_headers as corsheaders_default_headers
# pylint: enable=unicode-format-string # lint-amnesty, pylint: disable=bad-option-value
#####################################################################
from edx_django_utils.plugins import add_plugins
from openedx.core.djangoapps.plugins.constants import ProjectType, SettingsType
from .production import * # pylint: disable=wildcard-import, unused-wildcard-import
# Don't use S3 in devstack, fall back to filesystem
del DEFAULT_FILE_STORAGE
MEDIA_ROOT = "/edx/var/edxapp/uploads"
ORA2_FILEUPLOAD_BACKEND = 'django'
DEBUG = True
USE_I18N = True
DEFAULT_TEMPLATE_ENGINE['OPTIONS']['debug'] = True
LMS_BASE = 'localhost:18000'
CMS_BASE = 'localhost:18010'
SITE_NAME = LMS_BASE
SESSION_COOKIE_NAME = 'lms_sessionid'
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
HTTPS = 'off'
LMS_ROOT_URL = f'http://{LMS_BASE}'
LMS_INTERNAL_ROOT_URL = LMS_ROOT_URL
ENTERPRISE_API_URL = f'{LMS_INTERNAL_ROOT_URL}/enterprise/api/v1/'
IDA_LOGOUT_URI_LIST = [
'http://localhost:18130/logout/', # ecommerce
'http://localhost:18150/logout/', # credentials
'http://localhost:18381/logout/', # discovery
'http://localhost:18010/logout/', # studio
]
################################ LOGGERS ######################################
LOG_OVERRIDES = [
('common.djangoapps.track.contexts', logging.CRITICAL),
('common.djangoapps.track.middleware', logging.CRITICAL),
('lms.djangoapps.discussion.django_comment_client.utils', logging.CRITICAL),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
# Docker does not support the syslog socket at /dev/log. Rely on the console.
LOGGING['handlers']['local'] = LOGGING['handlers']['tracking'] = {
'class': 'logging.NullHandler',
}
LOGGING['loggers']['tracking']['handlers'] = ['console']
################################ EMAIL ########################################
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/edx/src/ace_messages/'
############################ PYFS XBLOCKS SERVICE #############################
# Set configuration for Django pyfilesystem
DJFS = {
'type': 'osfs',
'directory_root': 'lms/static/djpyfs',
'url_root': '/static/djpyfs',
}
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ['debug_toolbar']
MIDDLEWARE += [
'lms.djangoapps.discussion.django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (<|fim▁hole|> 'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.history.HistoryPanel',
# ProfilingPanel has been intentionally removed for default devstack.py
# runtimes for performance reasons. If you wish to re-enable it in your
# local development environment, please create a new settings file
# that imports and extends devstack.py.
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar',
}
def should_show_debug_toolbar(request): # lint-amnesty, pylint: disable=missing-function-docstring
# We always want the toolbar on devstack unless running tests from another Docker container
hostname = request.get_host()
if hostname.startswith('edx.devstack.lms:') or hostname.startswith('lms.devstack.edx:'):
return False
return True
########################### PIPELINE #################################
PIPELINE['PIPELINE_ENABLED'] = False
STATICFILES_STORAGE = 'openedx.core.storage.DevelopmentStorage'
# Revert to the default set of finders as we don't want the production pipeline
STATICFILES_FINDERS = [
'openedx.core.djangoapps.theming.finders.ThemeFilesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# Disable JavaScript compression in development
PIPELINE['JS_COMPRESSOR'] = None
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = DEBUG
PIPELINE['SASS_ARGUMENTS'] = '--debug-info'
# Load development webpack donfiguration
WEBPACK_CONFIG_PATH = 'webpack.dev.config.js'
########################### VERIFIED CERTIFICATES #################################
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
########################### External REST APIs #################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
########################## SECURITY #######################
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
########################### Milestones #################################
FEATURES['MILESTONES_APP'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
################################ COURSE LICENSES ################################
FEATURES['LICENSING'] = True
########################## Courseware Search #######################
FEATURES['ENABLE_COURSEWARE_SEARCH'] = False
FEATURES['ENABLE_COURSEWARE_SEARCH_FOR_COURSE_STAFF'] = True
SEARCH_ENGINE = 'search.elastic.ElasticSearchEngine'
########################## Dashboard Search #######################
FEATURES['ENABLE_DASHBOARD_SEARCH'] = False
########################## Certificates Web/HTML View #######################
FEATURES['CERTIFICATES_HTML_VIEW'] = True
########################## Course Discovery #######################
LANGUAGE_MAP = {
'terms': dict(ALL_LANGUAGES),
'name': 'Language',
}
COURSE_DISCOVERY_MEANINGS = {
'org': {
'name': 'Organization',
},
'modes': {
'name': 'Course Type',
'terms': {
'honor': 'Honor',
'verified': 'Verified',
},
},
'language': LANGUAGE_MAP,
}
FEATURES['ENABLE_COURSE_DISCOVERY'] = False
# Setting for overriding default filtering facets for Course discovery
# COURSE_DISCOVERY_FILTERS = ["org", "language", "modes"]
FEATURES['COURSES_ARE_BROWSEABLE'] = True
HOMEPAGE_COURSE_MAX = 9
# Software secure fake page feature flag
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
# Setting for the testing of Software Secure Result Callback
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
DISABLE_ACCOUNT_ACTIVATION_REQUIREMENT_SWITCH = "verify_student_disable_account_activation_requirement"
# Skip enrollment start date filtering
SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING = True
########################## Shopping cart ##########################
FEATURES['ENABLE_COSMETIC_DISPLAY_PRICE'] = True
######################### Program Enrollments #####################
FEATURES['ENABLE_ENROLLMENT_RESET'] = True
########################## Third Party Auth #######################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and (
'common.djangoapps.third_party_auth.dummy.DummyBackend' not in AUTHENTICATION_BACKENDS
):
AUTHENTICATION_BACKENDS = ['common.djangoapps.third_party_auth.dummy.DummyBackend'] + list(AUTHENTICATION_BACKENDS)
############## ECOMMERCE API CONFIGURATION SETTINGS ###############
ECOMMERCE_PUBLIC_URL_ROOT = 'http://localhost:18130'
ECOMMERCE_API_URL = 'http://edx.devstack.ecommerce:18130/api/v2'
############## Comments CONFIGURATION SETTINGS ###############
COMMENTS_SERVICE_URL = 'http://edx.devstack.forum:4567'
############## Credentials CONFIGURATION SETTINGS ###############
CREDENTIALS_INTERNAL_SERVICE_URL = 'http://edx.devstack.credentials:18150'
CREDENTIALS_PUBLIC_SERVICE_URL = 'http://localhost:18150'
############################### BLOCKSTORE #####################################
BLOCKSTORE_API_URL = "http://edx.devstack.blockstore:18250/api/v1/"
########################## PROGRAMS LEARNER PORTAL ##############################
LEARNER_PORTAL_URL_ROOT = 'http://localhost:8734'
########################## ENTERPRISE LEARNER PORTAL ##############################
ENTERPRISE_LEARNER_PORTAL_NETLOC = 'localhost:8734'
ENTERPRISE_LEARNER_PORTAL_BASE_URL = 'http://' + ENTERPRISE_LEARNER_PORTAL_NETLOC
########################## ENTERPRISE ADMIN PORTAL ##############################
ENTERPRISE_ADMIN_PORTAL_NETLOC = 'localhost:1991'
ENTERPRISE_ADMIN_PORTAL_BASE_URL = 'http://' + ENTERPRISE_ADMIN_PORTAL_NETLOC
###################### Cross-domain requests ######################
FEATURES['ENABLE_CORS_HEADERS'] = True
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ()
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_HEADERS = corsheaders_default_headers + (
'use-jwt-cookie',
)
LOGIN_REDIRECT_WHITELIST.extend([
CMS_BASE,
# Allow redirection to all micro-frontends.
# Please add your MFE if is not already listed here.
# Note: For this to work, the MFE must set BASE_URL in its .env.development to:
# BASE_URL=http://localhost:$PORT
# as opposed to:
# BASE_URL=localhost:$PORT
'localhost:1997', # frontend-app-account
'localhost:1976', # frontend-app-program-console
'localhost:1994', # frontend-app-gradebook
'localhost:2000', # frontend-app-learning
'localhost:2001', # frontend-app-course-authoring
'localhost:3001', # frontend-app-library-authoring
'localhost:18400', # frontend-app-publisher
'localhost:1993', # frontend-app-ora-grading
ENTERPRISE_LEARNER_PORTAL_NETLOC, # frontend-app-learner-portal-enterprise
ENTERPRISE_ADMIN_PORTAL_NETLOC, # frontend-app-admin-portal
])
###################### JWTs ######################
JWT_AUTH.update({
'JWT_AUDIENCE': 'lms-key',
'JWT_ISSUER': f'{LMS_ROOT_URL}/oauth2',
'JWT_ISSUERS': [{
'AUDIENCE': 'lms-key',
'ISSUER': f'{LMS_ROOT_URL}/oauth2',
'SECRET_KEY': 'lms-secret',
}],
'JWT_SECRET_KEY': 'lms-secret',
'JWT_SIGNING_ALGORITHM': 'RS512',
'JWT_PRIVATE_SIGNING_JWK': (
'{"e": "AQAB", "d": "RQ6k4NpRU3RB2lhwCbQ452W86bMMQiPsa7EJiFJUg-qBJthN0FMNQVbArtrCQ0xA1BdnQHThFiUnHcXfsTZUwmwvTu'
'iqEGR_MI6aI7h5D8vRj_5x-pxOz-0MCB8TY8dcuK9FkljmgtYvV9flVzCk_uUb3ZJIBVyIW8En7n7nV7JXpS9zey1yVLld2AbRG6W5--Pgqr9J'
'CI5-bLdc2otCLuen2sKyuUDHO5NIj30qGTaKUL-OW_PgVmxrwKwccF3w5uGNEvMQ-IcicosCOvzBwdIm1uhdm9rnHU1-fXz8VLRHNhGVv7z6mo'
'ghjNI0_u4smhUkEsYeshPv7RQEWTdkOQ", "n": "smKFSYowG6nNUAdeqH1jQQnH1PmIHphzBmwJ5vRf1vu48BUI5VcVtUWIPqzRK_LDSlZYh'
'9D0YFL0ZTxIrlb6Tn3Xz7pYvpIAeYuQv3_H5p8tbz7Fb8r63c1828wXPITVTv8f7oxx5W3lFFgpFAyYMmROC4Ee9qG5T38LFe8_oAuFCEntimW'
'xN9F3P-FJQy43TL7wG54WodgiM0EgzkeLr5K6cDnyckWjTuZbWI-4ffcTgTZsL_Kq1owa_J2ngEfxMCObnzGy5ZLcTUomo4rZLjghVpq6KZxfS'
'6I1Vz79ZsMVUWEdXOYePCKKsrQG20ogQEkmTf9FT_SouC6jPcHLXw", "q": "7KWj7l-ZkfCElyfvwsl7kiosvi-ppOO7Imsv90cribf88Dex'
'cO67xdMPesjM9Nh5X209IT-TzbsOtVTXSQyEsy42NY72WETnd1_nAGLAmfxGdo8VV4ZDnRsA8N8POnWjRDwYlVBUEEeuT_MtMWzwIKU94bzkWV'
'nHCY5vbhBYLeM", "p": "wPkfnjavNV1Hqb5Qqj2crBS9HQS6GDQIZ7WF9hlBb2ofDNe2K2dunddFqCOdvLXr7ydRcK51ZwSeHjcjgD1aJkHA'
'9i1zqyboxgd0uAbxVDo6ohnlVqYLtap2tXXcavKm4C9MTpob_rk6FBfEuq4uSsuxFvCER4yG3CYBBa4gZVU", "kid": "devstack_key", "'
'kty": "RSA"}'
),
'JWT_PUBLIC_SIGNING_JWK_SET': (
'{"keys": [{"kid": "devstack_key", "e": "AQAB", "kty": "RSA", "n": "smKFSYowG6nNUAdeqH1jQQnH1PmIHphzBmwJ5vRf1vu'
'48BUI5VcVtUWIPqzRK_LDSlZYh9D0YFL0ZTxIrlb6Tn3Xz7pYvpIAeYuQv3_H5p8tbz7Fb8r63c1828wXPITVTv8f7oxx5W3lFFgpFAyYMmROC'
'4Ee9qG5T38LFe8_oAuFCEntimWxN9F3P-FJQy43TL7wG54WodgiM0EgzkeLr5K6cDnyckWjTuZbWI-4ffcTgTZsL_Kq1owa_J2ngEfxMCObnzG'
'y5ZLcTUomo4rZLjghVpq6KZxfS6I1Vz79ZsMVUWEdXOYePCKKsrQG20ogQEkmTf9FT_SouC6jPcHLXw"}]}'
),
})
add_plugins(__name__, ProjectType.LMS, SettingsType.DEVSTACK)
######################### Django Rest Framework ########################
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] += (
'rest_framework.renderers.BrowsableAPIRenderer',
)
OPENAPI_CACHE_TIMEOUT = 0
#####################################################################
# Lastly, run any migrations, if needed.
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
EDXNOTES_INTERNAL_API = 'http://edx.devstack.edxnotesapi:18120/api/v1'
EDXNOTES_CLIENT_NAME = 'edx_notes_api-backend-service'
############## Settings for Microfrontends #########################
LEARNING_MICROFRONTEND_URL = 'http://localhost:2000'
ACCOUNT_MICROFRONTEND_URL = 'http://localhost:1997'
AUTHN_MICROFRONTEND_URL = 'http://localhost:1999'
AUTHN_MICROFRONTEND_DOMAIN = 'localhost:1999'
################### FRONTEND APPLICATION DISCUSSIONS ###################
DISCUSSIONS_MICROFRONTEND_URL = 'http://localhost:2002'
################### FRONTEND APPLICATION DISCUSSIONS FEEDBACK URL###################
DISCUSSIONS_MFE_FEEDBACK_URL = None
############## Docker based devstack settings #######################
FEATURES.update({
'AUTOMATIC_AUTH_FOR_TESTING': True,
'ENABLE_DISCUSSION_SERVICE': True,
'SHOW_HEADER_LANGUAGE_SELECTOR': True,
# Enable enterprise integration by default.
# See https://github.com/edx/edx-enterprise/blob/master/docs/development.rst for
# more background on edx-enterprise.
# Toggle this off if you don't want anything to do with enterprise in devstack.
'ENABLE_ENTERPRISE_INTEGRATION': True,
})
ENABLE_MKTG_SITE = os.environ.get('ENABLE_MARKETING_SITE', False)
MARKETING_SITE_ROOT = os.environ.get('MARKETING_SITE_ROOT', 'http://localhost:8080')
MKTG_URLS = {
'ABOUT': '/about',
'ACCESSIBILITY': '/accessibility',
'AFFILIATES': '/affiliate-program',
'BLOG': '/blog',
'CAREERS': '/careers',
'CONTACT': '/support/contact_us',
'COURSES': '/course',
'DONATE': '/donate',
'ENTERPRISE': '/enterprise',
'FAQ': '/student-faq',
'HONOR': '/edx-terms-service',
'HOW_IT_WORKS': '/how-it-works',
'MEDIA_KIT': '/media-kit',
'NEWS': '/news-announcements',
'PRESS': '/press',
'PRIVACY': '/edx-privacy-policy',
'ROOT': MARKETING_SITE_ROOT,
'SCHOOLS': '/schools-partners',
'SITE_MAP': '/sitemap',
'TRADEMARKS': '/trademarks',
'TOS': '/edx-terms-service',
'TOS_AND_HONOR': '/edx-terms-service',
'WHAT_IS_VERIFIED_CERT': '/verified-certificate',
}
ENTERPRISE_MARKETING_FOOTER_QUERY_PARAMS = {}
CREDENTIALS_SERVICE_USERNAME = 'credentials_worker'
COURSE_CATALOG_URL_ROOT = 'http://edx.devstack.discovery:18381'
COURSE_CATALOG_API_URL = f'{COURSE_CATALOG_URL_ROOT}/api/v1'
SYSTEM_WIDE_ROLE_CLASSES = os.environ.get("SYSTEM_WIDE_ROLE_CLASSES", SYSTEM_WIDE_ROLE_CLASSES)
SYSTEM_WIDE_ROLE_CLASSES.append(
'system_wide_roles.SystemWideRoleAssignment',
)
if FEATURES.get('ENABLE_ENTERPRISE_INTEGRATION'):
SYSTEM_WIDE_ROLE_CLASSES.append(
'enterprise.SystemWideEnterpriseUserRoleAssignment',
)
#####################################################################
# django-session-cookie middleware
DCS_SESSION_COOKIE_SAMESITE = 'Lax'
DCS_SESSION_COOKIE_SAMESITE_FORCE_ALL = True
########################## THEMING #######################
# If you want to enable theming in devstack, uncomment this section and add any relevant
# theme directories to COMPREHENSIVE_THEME_DIRS
# We have to import the private method here because production.py calls
# derive_settings('lms.envs.production') which runs _make_mako_template_dirs with
# the settings from production, which doesn't include these theming settings. Thus,
# the templating engine is unable to find the themed templates because they don't exist
# in it's path. Re-calling derive_settings doesn't work because the settings was already
# changed from a function to a list, and it can't be derived again.
# from .common import _make_mako_template_dirs
# ENABLE_COMPREHENSIVE_THEMING = True
# COMPREHENSIVE_THEME_DIRS = [
# "/edx/app/edxapp/edx-platform/themes/"
# ]
# TEMPLATES[1]["DIRS"] = _make_mako_template_dirs
# derive_settings(__name__)
# Uncomment the lines below if you'd like to see SQL statements in your devstack LMS log.
# LOGGING['handlers']['console']['level'] = 'DEBUG'
# LOGGING['loggers']['django.db.backends'] = {'handlers': ['console'], 'level': 'DEBUG', 'propagate': False}
################### Special Exams (Proctoring) and Prereqs ###################
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
FEATURES['ENABLE_PREREQUISITE_COURSES'] = True
# Used in edx-proctoring for ID generation in lieu of SECRET_KEY - dummy value
# (ref MST-637)
PROCTORING_USER_OBFUSCATION_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
#################### Webpack Configuration Settings ##############################
WEBPACK_LOADER['DEFAULT']['TIMEOUT'] = 5
################# New settings must go ABOVE this line #################
########################################################################
# See if the developer has any local overrides.
if os.path.isfile(join(dirname(abspath(__file__)), 'private.py')):
from .private import * # pylint: disable=import-error,wildcard-import<|fim▁end|>
|
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
|
<|file_name|>event.rs<|end_file_name|><|fim▁begin|>use std::mem;
use crate::syntax::SyntaxKind;
use crate::{ParseError, TreeSink};
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum Event {
BeginMarker,
Begin(SyntaxKind, Option<usize>),
Leaf(SyntaxKind),
End,
Error,
Tombstone,
}
pub fn process(sink: &mut dyn TreeSink, mut events: Vec<Event>) {
let mut forward_parents = Vec::new();
for i in 0..events.len() {
match mem::replace(&mut events[i], Event::Tombstone) {
Event::BeginMarker | Event::Tombstone => {}
Event::Begin(kind, forward_parent) => {
// For events[A, B, C], B is A's forward_parent, C is B's forward_parent,
// in the normal control flow, the parent-child relation: `A -> B -> C`,
// while with the magic forward_parent, it writes: `C <- B <- A`.
// append `A` into parents.
forward_parents.push(kind);
let mut parent_idx = i;
let mut fp = forward_parent;
while let Some(fwd) = fp {
parent_idx += fwd;
fp = match mem::replace(&mut events[parent_idx], Event::Tombstone) {
Event::Begin(kind, forward_parent) => {
forward_parents.push(kind);
forward_parent
}
Event::Tombstone => None,
e => unreachable!("found unresolved {:#?} at position {}", e, parent_idx),
};
}
for kind in forward_parents.drain(..).rev() {
sink.start_node(kind);
}<|fim▁hole|> }
Event::Error => sink.error(ParseError("no error message handling yet".to_string())),
}
}
}<|fim▁end|>
|
}
Event::End => sink.finish_node(),
Event::Leaf(kind) => {
sink.token(kind);
|
<|file_name|>recurring_transaction_frequency.py<|end_file_name|><|fim▁begin|>"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class RecurringTransactionFrequency(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('value',): {
'UNKNOWN': "UNKNOWN",
'WEEKLY': "WEEKLY",
'BIWEEKLY': "BIWEEKLY",
'SEMI_MONTHLY': "SEMI_MONTHLY",
'MONTHLY': "MONTHLY",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (str,),
}
@cached_property
def discriminator():
return None
<|fim▁hole|>
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""RecurringTransactionFrequency - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): describes the frequency of the transaction stream.., must be one of ["UNKNOWN", "WEEKLY", "BIWEEKLY", "SEMI_MONTHLY", "MONTHLY", ] # noqa: E501
Keyword Args:
value (str): describes the frequency of the transaction stream.., must be one of ["UNKNOWN", "WEEKLY", "BIWEEKLY", "SEMI_MONTHLY", "MONTHLY", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop('_path_to_item', ())
if 'value' in kwargs:
value = kwargs.pop('value')
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments." % (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)<|fim▁end|>
|
attribute_map = {}
_composed_schemas = None
|
<|file_name|>development.js<|end_file_name|><|fim▁begin|>module.exports.name = 'development';
module.exports.dbtype = "mssql";<|fim▁hole|>module.exports.windwalkerPort = 8080;<|fim▁end|>
|
module.exports.datasource = {user: "sa", password: "sa", host: "localhost", port: 1433};
|
<|file_name|>urls.py<|end_file_name|><|fim▁begin|><|fim▁hole|> https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^scraper/', include('scraper.urls')),
url(r'^admin/', admin.site.urls),
]<|fim▁end|>
|
"""code_for_good URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
|
<|file_name|>Script_Doc_Export.cpp<|end_file_name|><|fim▁begin|>/*****************************************************************************
The Dark Mod GPL Source Code
This file is part of the The Dark Mod Source Code, originally based
on the Doom 3 GPL Source Code as published in 2011.
The Dark Mod Source Code is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version. For details, see LICENSE.TXT.
Project: The Dark Mod (http://www.thedarkmod.com/)
$Revision$ (Revision of last commit)
$Date$ (Date of last commit)
$Author$ (Author of last commit)
******************************************************************************/
#include "precompiled_game.h"
#pragma hdrstop
static bool versioned = RegisterVersionedFile( "$Id$" );
#include "Script_Doc_Export.h"
#include "../pugixml/pugixml.hpp"
namespace {
inline void Write( idFile &out, const idStr &str ) {
out.Write( str.c_str(), str.Length() );
}
inline void Writeln( idFile &out, const idStr &str ) {
out.Write( ( str + "\n" ).c_str(), str.Length() + 1 );
}
idStr GetEventArgumentString( const idEventDef &ev ) {
idStr out;
static const char *gen = "abcdefghijklmnopqrstuvwxyz";
int g = 0;
const EventArgs &args = ev.GetArgs();
for( EventArgs::const_iterator i = args.begin(); i != args.end(); ++i ) {
out += out.IsEmpty() ? "" : ", ";
idTypeDef *type = idCompiler::GetTypeForEventArg( i->type );
// Use a generic variable name "a", "b", "c", etc. if no name present
out += va( "%s %s", type->Name(), strlen( i->name ) > 0 ? i->name : idStr( gen[g++] ).c_str() );
}
return out;
}
inline bool EventIsPublic( const idEventDef &ev ) {
const char *eventName = ev.GetName();
if( eventName != NULL && ( eventName[0] == '<' || eventName[0] == '_' ) ) {
return false; // ignore all event names starting with '<', these mark internal events
}
const char *argFormat = ev.GetArgFormat();
int numArgs = strlen( argFormat );
// Check if any of the argument types is invalid before allocating anything
for( int arg = 0; arg < numArgs; ++arg ) {
idTypeDef *argType = idCompiler::GetTypeForEventArg( argFormat[arg] );
if( argType == NULL ) {
return false;
}
}
return true;
}
idList<idTypeInfo *> GetRespondingTypes( const idEventDef &ev ) {
idList<idTypeInfo *> tempList;
int numTypes = idClass::GetNumTypes();
for( int i = 0; i < numTypes; ++i ) {
idTypeInfo *info = idClass::GetType( i );
if( info->RespondsTo( ev ) ) {
tempList.Append( info );
}
}
idList<idTypeInfo *> finalList;
// Remove subclasses from the list, only keep top-level nodes
for( int i = 0; i < tempList.Num(); ++i ) {
bool isSubclass = false;
for( int j = 0; j < tempList.Num(); ++j ) {
if( i == j ) {
continue;
}
if( tempList[i]->IsType( *tempList[j] ) ) {
isSubclass = true;
break;
}
}
if( !isSubclass ) {
finalList.Append( tempList[i] );
}
}
return finalList;
}
int SortTypesByClassname( idTypeInfo *const *a, idTypeInfo *const *b ) {
return idStr::Cmp( ( *a )->classname, ( *b )->classname );
}
}
ScriptEventDocGenerator::ScriptEventDocGenerator() {
for( int i = 0; i < idEventDef::NumEventCommands(); ++i ) {
const idEventDef *def = idEventDef::GetEventCommand( i );
_events[std::string( def->GetName() )] = def;
}
time_t timer = time( NULL );
struct tm *t = localtime( &timer );
_dateStr = va( "%04u-%02u-%02u %02u:%02u", t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, t->tm_hour, t->tm_min );
}
// --------- D3 Script -----------
idStr ScriptEventDocGeneratorD3Script::GetEventDocumentation( const idEventDef &ev ) {
idStr out = "/**\n";
out += " * ";
// Format line breaks in the description
idStr desc( ev.GetDescription() );
desc.Replace( "\n", "\n * " );
out += desc;
const EventArgs &args = ev.GetArgs();
idStr argDesc;
for( EventArgs::const_iterator i = args.begin(); i != args.end(); ++i ) {
if( idStr::Length( i->desc ) == 0 ) {
continue;
}
// Format line breaks in the description
idStr desc( i->desc );
desc.Replace( "\n", "\n * " );
argDesc += va( "\n * @%s: %s", i->name, desc.c_str() );
}
if( !argDesc.IsEmpty() ) {
out += "\n * ";
out += argDesc;
}
out += "\n */";
return out;
}
void ScriptEventDocGeneratorD3Script::WriteDoc( idFile &out ) {
Write( out, "#ifndef __TDM_EVENTS__\n" );
Write( out, "#define __TDM_EVENTS__\n\n" );
Write( out, "/**\n" );
Write( out, " * The Dark Mod Script Event Documentation\n" );
Write( out, " * \n" );
Write( out, " * This file has been generated automatically by the tdm_gen_script_event_doc console command.\n" );
Write( out, " * Last update: " + _dateStr + "\n" );
Write( out, " */\n" );
Write( out, "\n" );
Write( out, "// ===== THIS FILE ONLY SERVES FOR DOCUMENTATION PURPOSES, IT'S NOT ACTUALLY READ BY THE GAME =======\n" );
Write( out, "// ===== If you want to force this file to be loaded, change the line below to #if 1 ================\n" );
Write( out, "#if 0\n" );
Write( out, "\n" );
Write( out, "\n" );
for( EventMap::const_iterator i = _events.begin(); i != _events.end(); ++i ) {
const idEventDef &ev = *i->second;
if( !EventIsPublic( ev ) ) {
continue;
}
idTypeDef *returnType = idCompiler::GetTypeForEventArg( ev.GetReturnType() );
idStr signature = GetEventArgumentString( ev );
idStr documentation = GetEventDocumentation( ev );
idStr outStr = va( "\n%s\nscriptEvent %s\t\t%s(%s);\n",
documentation.c_str(), returnType->Name(), ev.GetName(), signature.c_str() );
Write( out, outStr );
}
Write( out, "\n" );
Write( out, "#endif\n" );
Write( out, "\n" );
Write( out, "\n\n#endif\n" );
}
// ------------- Mediawiki -------------
idStr ScriptEventDocGeneratorMediaWiki::GetEventDescription( const idEventDef &ev ) {
idStr out = ":";
// Format line breaks in the description
idStr desc( ev.GetDescription() );
desc.Replace( "\n", " " ); // no artificial line breaks
out += desc;
out += "\n";
const EventArgs &args = ev.GetArgs();
idStr argDesc;
for( EventArgs::const_iterator i = args.begin(); i != args.end(); ++i ) {
if( idStr::Length( i->desc ) == 0 ) {
continue;
}
// Format line breaks in the description
idStr desc( i->desc );
desc.Replace( "\n", " " ); // no artificial line breaks
argDesc += va( "::''%s'': %s\n", i->name, desc.c_str() );
}
if( !argDesc.IsEmpty() ) {
//out += "\n:";
out += argDesc;
}
return out;
}
idStr ScriptEventDocGeneratorMediaWiki::GetEventDoc( const idEventDef *ev, bool includeSpawnclassInfo ) {
idStr out;
idTypeDef *returnType = idCompiler::GetTypeForEventArg( ev->GetReturnType() );
idStr signature = GetEventArgumentString( *ev );
idStr description = GetEventDescription( *ev );
idStr outStr = va( "==== scriptEvent %s '''%s'''(%s); ====\n",
returnType->Name(), ev->GetName(), signature.c_str() );
out += outStr + "\n";
out += description + "\n";
// Get type response info
idList<idTypeInfo *> list = GetRespondingTypes( *ev );
list.Sort( SortTypesByClassname );
if( includeSpawnclassInfo ) {
idStr typeInfoStr;
for( int t = 0; t < list.Num(); ++t ) {
idTypeInfo *type = list[t];
typeInfoStr += ( typeInfoStr.IsEmpty() ) ? "" : ", ";
typeInfoStr += "''";
typeInfoStr += type->classname;
typeInfoStr += "''";
}
typeInfoStr = ":Spawnclasses responding to this event: " + typeInfoStr;
out += typeInfoStr + "\n";
}
return out;
}
void ScriptEventDocGeneratorMediaWiki::WriteDoc( idFile &out ) {
idStr version = va( "%s %d.%02d, code revision %d",
GAME_VERSION,
TDM_VERSION_MAJOR, TDM_VERSION_MINOR,
RevisionTracker::Instance().GetHighestRevision()
);
Writeln( out, "This page has been generated automatically by the tdm_gen_script_event_doc console command." );
Writeln( out, "" );
Writeln( out, "Generated by " + version + ", last update: " + _dateStr );
Writeln( out, "" );
Writeln( out, "{{tdm-scripting-reference-intro}}" );
// Table of contents, but don't show level 4 headlines
Writeln( out, "<div class=\"toclimit-4\">" ); // SteveL #3740
Writeln( out, "__TOC__" );
Writeln( out, "</div>" );
Writeln( out, "= TDM Script Event Reference =" );
Writeln( out, "" );
Writeln( out, "== All Events ==" );
Writeln( out, "=== Alphabetic List ===" ); // #3740 Two headers are required here for the toclimit to work. We can't skip a heading level.
typedef std::vector<const idEventDef *> EventList;
typedef std::map<idTypeInfo *, EventList> SpawnclassEventMap;
SpawnclassEventMap spawnClassEventMap;
for( EventMap::const_iterator i = _events.begin(); i != _events.end(); ++i ) {
const idEventDef *ev = i->second;
if( !EventIsPublic( *ev ) ) {
continue;
}
Write( out, GetEventDoc( ev, true ) );
idList<idTypeInfo *> respTypeList = GetRespondingTypes( *ev );
respTypeList.Sort( SortTypesByClassname );
// Collect info for each spawnclass
for( int t = 0; t < respTypeList.Num(); ++t ) {
idTypeInfo *type = respTypeList[t];
SpawnclassEventMap::iterator typeIter = spawnClassEventMap.find( type );
// Put the event in the class info map
if( typeIter == spawnClassEventMap.end() ) {
typeIter = spawnClassEventMap.insert( SpawnclassEventMap::value_type( type, EventList() ) ).first;
}
typeIter->second.push_back( ev );
}
}
// Write info grouped by class
Writeln( out, "" );
Writeln( out, "== Events by Spawnclass / Entity Type ==" );
for( SpawnclassEventMap::const_iterator i = spawnClassEventMap.begin();
i != spawnClassEventMap.end(); ++i ) {
Writeln( out, idStr( "=== " ) + i->first->classname + " ===" );
//Writeln(out, "Events:" + idStr(static_cast<int>(i->second.size())));
for( EventList::const_iterator t = i->second.begin(); t != i->second.end(); ++t ) {
Write( out, GetEventDoc( *t, false ) );
}
}
Writeln( out, "[[Category:Scripting]]" );
}
// -------- XML -----------
void ScriptEventDocGeneratorXml::WriteDoc( idFile &out ) {
pugi::xml_document doc;
idStr version = va( "%d.%02d", TDM_VERSION_MAJOR, TDM_VERSION_MINOR );
time_t timer = time( NULL );
struct tm *t = localtime( &timer );
idStr isoDateStr = va( "%04u-%02u-%02u", t->tm_year + 1900, t->tm_mon + 1, t->tm_mday );
pugi::xml_node eventDocNode = doc.append_child( "eventDocumentation" );
pugi::xml_node eventDocVersion = eventDocNode.append_child( "info" );
eventDocVersion.append_attribute( "game" ).set_value( GAME_VERSION );
eventDocVersion.append_attribute( "tdmversion" ).set_value( version.c_str() );
eventDocVersion.append_attribute( "coderevision" ).set_value( RevisionTracker::Instance().GetHighestRevision() );
eventDocVersion.append_attribute( "date" ).set_value( isoDateStr.c_str() );
for( EventMap::const_iterator i = _events.begin(); i != _events.end(); ++i ) {
const idEventDef *ev = i->second;
if( !EventIsPublic( *ev ) ) {
continue;
}
pugi::xml_node eventNode = eventDocNode.append_child( "event" );
eventNode.append_attribute( "name" ).set_value( ev->GetName() );
// Description
pugi::xml_node evDescNode = eventNode.append_child( "description" );
idStr desc( ev->GetDescription() );
desc.Replace( "\n", " " ); // no artificial line breaks
evDescNode.append_attribute( "value" ).set_value( desc.c_str() );
// Arguments
static const char *gen = "abcdefghijklmnopqrstuvwxyz";
int g = 0;
const EventArgs &args = ev->GetArgs();<|fim▁hole|> argNode.append_attribute( "name" ).set_value( strlen( i->name ) > 0 ? i->name : idStr( gen[g++] ).c_str() );
argNode.append_attribute( "type" ).set_value( type->Name() );
idStr desc( i->desc );
desc.Replace( "\n", " " ); // no artificial line breaks
argNode.append_attribute( "description" ).set_value( desc.c_str() );
}
idList<idTypeInfo *> respTypeList = GetRespondingTypes( *ev );
respTypeList.Sort( SortTypesByClassname );
// Responding Events
pugi::xml_node evRespTypesNode = eventNode.append_child( "respondingTypes" );
for( int t = 0; t < respTypeList.Num(); ++t ) {
idTypeInfo *type = respTypeList[t];
pugi::xml_node respTypeNode = evRespTypesNode.append_child( "respondingType" );
respTypeNode.append_attribute( "spawnclass" ).set_value( type->classname );
}
}
std::stringstream stream;
doc.save( stream );
out.Write( stream.str().c_str(), stream.str().length() );
}<|fim▁end|>
|
for( EventArgs::const_iterator i = args.begin(); i != args.end(); ++i ) {
idTypeDef *type = idCompiler::GetTypeForEventArg( i->type );
// Use a generic variable name "a", "b", "c", etc. if no name present
pugi::xml_node argNode = eventNode.append_child( "argument" );
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python<|fim▁hole|>
if sys.version_info >= (3,):
BOOST_LIB = 'boost_python3'
else:
BOOST_LIB = 'boost_python'
module_RF24Network = Extension('RF24Network',
libraries = ['rf24network', BOOST_LIB],
sources = ['pyRF24Network.cpp'])
setup(name='RF24Network',
version='1.0',
ext_modules=[module_RF24Network]
)<|fim▁end|>
|
from distutils.core import setup, Extension
import sys
|
<|file_name|>356_pegsnholes.cpp<|end_file_name|><|fim▁begin|>#include <cstdio>
#include <iostream>
#include <vector>
#include <cstring>
#include <cstdlib>
#include <cmath>
using namespace std;
#define DEBUG
#undef DEBUG //uncomment this line to pull out print statements
#ifdef DEBUG
#define TAB '\t'
#define debug(a, end) cout << #a << ": " << a << end
#else
#define debug(a, end)
#endif
typedef pair<int, int> point;
typedef long long int64; //for clarity
typedef vector<int> vi; //?
typedef vector<point> vp; //?
template<class T> void chmin(T &t, T f) { if (t > f) t = f; } //change min
template<class T> void chmax(T &t, T f) { if (t < f) t = f; } //change max
#define UN(v) SORT(v),v.erase(unique(v.begin(),v.end()),v.end())
#define SORT(c) sort((c).begin(),(c).end())
#define FOR(i,a,b) for (int i=(a); i < (b); i++)
#define REP(i,n) FOR(i,0,n)
#define CL(a,b) memset(a,b,sizeof(a))
#define CL2d(a,b,x,y) memset(a, b, sizeof(a[0][0])*x*y)
/*global variables*/
bool first_time = true;
const double PI = acos(-1.0);
int n;
/*global variables*/
void dump()
{
//dump data
}
bool getInput()
{
//get input
if (scanf("%d\n", &n) == EOF) return false;
if (!first_time) printf("\n");
else first_time = false;
return true;
}
bool in_circle(const point& x, double radius)
{
double y = (x.first*x.first + x.second*x.second);<|fim▁hole|> if (y < (radius*radius))
{ debug(y, TAB); debug(radius, endl); }
return y <= (radius*radius);
}
void process()
{
//process input
//int t = (int)ceil(((2*n-1)*(2*n-1))/4*PI)/4;
double r = (double)(2*n-1)/2;
int in = 0, out = 0;
point x;
REP(i, n)
{
REP(j, n)
{
x.first = i;
x.second = j; //top left
if (in_circle(x, r))
{
debug("contained segment", endl);
out++;
x.first = i+1;
x.second = j+1; //bottom right
if (in_circle(x, r))
{
debug("fully in", endl);
in++; out--;
}
}
}
}
printf("In the case n = %d, %d cells contain segments of the circle.\n", n, out*4);
printf("There are %d cells completely contained in the circle.\n", in*4);
}
int main()
{
while (getInput())
{
process();
/*output*/
/*output*/
}
return 0;
}<|fim▁end|>
| |
<|file_name|>payment_method_nonce.js<|end_file_name|><|fim▁begin|>//@ sourceMappingURL=payment_method_nonce.map
// Generated by CoffeeScript 1.6.1
var AttributeSetter, PaymentMethodNonce,
__hasProp = {}.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; };
AttributeSetter = require('./attribute_setter').AttributeSetter;
PaymentMethodNonce = (function(_super) {
__extends(PaymentMethodNonce, _super);
function PaymentMethodNonce() {
return PaymentMethodNonce.__super__.constructor.apply(this, arguments);
}
return PaymentMethodNonce;
<|fim▁hole|>
exports.PaymentMethodNonce = PaymentMethodNonce;<|fim▁end|>
|
})(AttributeSetter);
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
//! See [`TracingCollector`] for the main structure.
#![allow(unused_variables)]
#![allow(dead_code)]
pub mod model;
pub use model::TracingData;
use parking_lot::Mutex;
use std::sync::Arc;
use tracing::span::{Attributes, Record};
use tracing::subscriber::SetGlobalDefaultError;
use tracing::{Event, Id, Level, Metadata, Subscriber};
use tracing_subscriber::layer::{Context, Layer, SubscriberExt};
use tracing_subscriber::registry::LookupSpan;
use tracing_subscriber::Registry;
pub fn init(data: Arc<Mutex<TracingData>>, level: Level) -> Result<(), SetGlobalDefaultError> {
let collector = default_collector(data, level);
tracing::subscriber::set_global_default(collector)
}
pub fn default_collector(
data: Arc<Mutex<TracingData>>,
level: Level,
) -> impl Subscriber + for<'a> LookupSpan<'a> {
let tracing_data_subscriber = TracingCollector::new(data, level);
Registry::default().with(tracing_data_subscriber)
}
pub fn test_init() -> Result<(), SetGlobalDefaultError> {
let data = Arc::new(Mutex::new(TracingData::new()));
init(data, Level::INFO)
}
/// A `tokio/tracing` subscriber that collects tracing data to [`TracingData`].
/// [`TracingData`] is independent from `tokio/tracing`. See its docstring for
/// more details.
pub struct TracingCollector {
level: Level,
data: Arc<Mutex<TracingData>>,
}
impl TracingCollector {
pub fn new(data: Arc<Mutex<TracingData>>, level: Level) -> Self {
Self { level, data }
}
}
impl<S: Subscriber> Layer<S> for TracingCollector {
fn enabled(&self, metadata: &Metadata<'_>, ctx: Context<'_, S>) -> bool {
metadata.level() <= &self.level
}
fn on_new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) {
let callsite_id = attrs.metadata().callsite();
let mut data = self.data.lock();
let count = data.callsite_entered.entry(callsite_id).or_default();
*count += 1;
if *count < data.max_span_ref_count {
let espan_id = data.new_span(attrs);
data.insert_id_mapping(id, espan_id);
}
}
fn on_record(&self, span_id: &Id, values: &Record<'_>, _ctx: Context<'_, S>) {
let mut data = self.data.lock();
data.record(span_id, values);
}
fn on_follows_from(&self, span_id: &Id, follows: &Id, _ctx: Context<'_, S>) {
let mut data = self.data.lock();
if let Some(espan_id) = data.get_espan_id_from_trace(span_id) {
data.record_follows_from(&espan_id.into(), follows);
}
}
fn on_event(&self, event: &Event<'_>, _ctx: Context<'_, S>) {
let callsite_id = event.metadata().callsite();
let mut data = self.data.lock();
let count = data.callsite_entered.entry(callsite_id).or_default();
*count += 1;
if *count < data.max_span_ref_count {
data.event(event)
}
}
fn on_enter(&self, span_id: &Id, _ctx: Context<'_, S>) {
let mut data = self.data.lock();
if let Some(espan_id) = data.get_espan_id_from_trace(span_id) {
data.enter(&espan_id.into());
}
}
fn on_exit(&self, span_id: &Id, _ctx: Context<'_, S>) {
let mut data = self.data.lock();
if let Some(espan_id) = data.get_espan_id_from_trace(span_id) {
data.exit(&espan_id.into());
}
}
}
impl Subscriber for TracingCollector {
fn enabled(&self, metadata: &Metadata) -> bool {
metadata.level() <= &self.level
}
fn new_span(&self, span: &Attributes) -> Id {
let mut data = self.data.lock();
data.new_span(span).into()
}
fn record(&self, span: &Id, values: &Record) {
let mut data = self.data.lock();
data.record(span, values)
}
fn record_follows_from(&self, span: &Id, follows: &Id) {
let mut data = self.data.lock();
data.record_follows_from(span, follows)
}
fn event(&self, event: &Event) {<|fim▁hole|> let mut data = self.data.lock();
data.event(event)
}
fn enter(&self, span: &Id) {
let mut data = self.data.lock();
data.enter(span)
}
fn exit(&self, span: &Id) {
let mut data = self.data.lock();
data.exit(span)
}
}
#[cfg(test)]
mod tests {
use super::*;
use tracing::instrument;
#[instrument]
fn fib(x: u32) -> u32 {
match x {
0 | 1 => 1,
2 => 2,
_ => fib(x - 1) + fib(x - 2),
}
}
#[test]
fn test_instrument() {
let data = TracingData::new_for_test();
let data = Arc::new(Mutex::new(data));
let collector = default_collector(data.clone(), Level::INFO);
tracing::subscriber::with_default(collector, || fib(5));
let mut data = data.lock();
data.fixup_module_lines_for_tests();
assert_eq!(
data.ascii(&Default::default()),
r#"Process _ Thread _:
Start Dur.ms | Name Source
2 +34 | fib <mod> line <line>
| - x = 5 :
4 +18 \ fib <mod> line <line>
| - x = 4 :
6 +10 \ fib <mod> line <line>
| - x = 3 :
8 +2 \ fib <mod> line <line>
| - x = 2 :
12 +2 \ fib <mod> line <line>
| - x = 1 :
18 +2 \ fib <mod> line <line>
| - x = 2 :
24 +10 \ fib <mod> line <line>
| - x = 3 :
26 +2 \ fib <mod> line <line>
| - x = 2 :
30 +2 \ fib <mod> line <line>
| - x = 1 :
"#
);
}
#[test]
fn test_multi_threads() {
let data = TracingData::new_for_test();
let data = Arc::new(Mutex::new(data));
let collector = default_collector(data.clone(), Level::INFO);
tracing::subscriber::with_default(collector, || fib(0));
let cloned = data.clone();
let thread = std::thread::spawn(|| {
let collector = default_collector(cloned, Level::INFO);
tracing::subscriber::with_default(collector, || fib(3));
});
thread.join().unwrap();
let cloned = data.clone();
let thread = std::thread::spawn(|| {
let collector = default_collector(cloned, Level::INFO);
tracing::subscriber::with_default(collector, || fib(2));
});
thread.join().unwrap();
data.lock().fixup_module_lines_for_tests();
assert_eq!(
data.lock().ascii(&Default::default()),
r#"Process _ Thread _:
Start Dur.ms | Name Source
2 +2 | fib <mod> line <line>
| - x = 0 :
Process _ Thread _:
Start Dur.ms | Name Source
6 +10 | fib <mod> line <line>
| - x = 3 :
8 +2 \ fib <mod> line <line>
| - x = 2 :
12 +2 \ fib <mod> line <line>
| - x = 1 :
Process _ Thread _:
Start Dur.ms | Name Source
18 +2 | fib <mod> line <line>
| - x = 2 :
"#
);
}
#[test]
fn test_span_count_limit() {
let mut data = TracingData::new_for_test();
data.max_span_ref_count = 5;
let data = Arc::new(Mutex::new(data));
let collector = default_collector(data.clone(), Level::INFO);
tracing::subscriber::with_default(collector, || fib(10));
data.lock().fixup_module_lines_for_tests();
// fib(6) ... are not logged.
assert_eq!(
data.lock().ascii(&Default::default()),
r#"Process _ Thread _:
Start Dur.ms | Name Source
2 +14 | fib <mod> line <line>
| - x = 10 :
4 +10 | fib <mod> line <line>
| - x = 9 :
6 +6 | fib <mod> line <line>
| - x = 8 :
8 +2 | fib <mod> line <line>
| - x = 7 :
"#
);
}
#[test]
fn test_log_count_limit() {
let mut data = TracingData::new_for_test();
data.max_span_ref_count = 5;
let data = Arc::new(Mutex::new(data));
let collector = default_collector(data.clone(), Level::INFO);
let counts = tracing::subscriber::with_default(collector, || {
(0..10)
.map(|_| {
tracing::info!("log something");
data.lock().eventus_len_for_tests()
})
.collect::<Vec<usize>>()
});
// Repetitive logs are ignored.
assert_eq!(counts, [1, 2, 3, 4, 4, 4, 4, 4, 4, 4]);
}
}<|fim▁end|>
| |
<|file_name|>quotedstring.go<|end_file_name|><|fim▁begin|>package opts // import "github.com/tiborvass/docker/opts"
// QuotedString is a string that may have extra quotes around the value. The
// quotes are stripped from the value.
type QuotedString struct {<|fim▁hole|>// Set sets a new value
func (s *QuotedString) Set(val string) error {
*s.value = trimQuotes(val)
return nil
}
// Type returns the type of the value
func (s *QuotedString) Type() string {
return "string"
}
func (s *QuotedString) String() string {
return *s.value
}
func trimQuotes(value string) string {
lastIndex := len(value) - 1
for _, char := range []byte{'\'', '"'} {
if value[0] == char && value[lastIndex] == char {
return value[1:lastIndex]
}
}
return value
}
// NewQuotedString returns a new quoted string option
func NewQuotedString(value *string) *QuotedString {
return &QuotedString{value: value}
}<|fim▁end|>
|
value *string
}
|
<|file_name|>set-focus.ts<|end_file_name|><|fim▁begin|>import {Directive, AfterViewInit, ElementRef, Renderer} from '@angular/core';
@Directive({
selector: '[setFocus]'
})
export class SetFocusDirective implements AfterViewInit {
constructor(public renderer: Renderer, public elementRef: ElementRef) {
}
ngAfterViewInit() {
this.renderer.invokeElementMethod(<|fim▁hole|><|fim▁end|>
|
this.elementRef.nativeElement, 'focus', []);
}
}
|
<|file_name|>exceptions.ts<|end_file_name|><|fim▁begin|>import {ListWrapper, List} from 'angular2/src/facade/collection';
import {stringify, BaseException, isBlank} from 'angular2/src/facade/lang';
function findFirstClosedCycle(keys: List<any>): List<any> {
var res = [];
for (var i = 0; i < keys.length; ++i) {
if (ListWrapper.contains(res, keys[i])) {
res.push(keys[i]);
return res;
} else {
res.push(keys[i]);
}
}
return res;
}
function constructResolvingPath(keys: List<any>): string {
if (keys.length > 1) {
var reversed = findFirstClosedCycle(ListWrapper.reversed(keys));
var tokenStrs = ListWrapper.map(reversed, (k) => stringify(k.token));
return " (" + tokenStrs.join(' -> ') + ")";
} else {
return "";
}
}
/**
* Base class for all errors arising from misconfigured bindings.
*
* @exportedAs angular2/di_errors
*/
export class AbstractBindingError extends BaseException {
name: string;
message: string;
keys: List<any>;
constructResolvingMessage: Function;
// TODO(tbosch): Can't do key:Key as this results in a circular dependency!
constructor(key, constructResolvingMessage: Function, originalException?, originalStack?) {
super(null, originalException, originalStack);
this.keys = [key];
this.constructResolvingMessage = constructResolvingMessage;
this.message = this.constructResolvingMessage(this.keys);
}<|fim▁hole|> // TODO(tbosch): Can't do key:Key as this results in a circular dependency!
addKey(key): void {
this.keys.push(key);
this.message = this.constructResolvingMessage(this.keys);
}
toString(): string { return this.message; }
}
/**
* Thrown when trying to retrieve a dependency by `Key` from {@link Injector}, but the
* {@link Injector} does not have a {@link Binding} for {@link Key}.
*
* @exportedAs angular2/di_errors
*/
export class NoBindingError extends AbstractBindingError {
// TODO(tbosch): Can't do key:Key as this results in a circular dependency!
constructor(key) {
super(key, function(keys: List<any>) {
var first = stringify(ListWrapper.first(keys).token);
return `No provider for ${first}!${constructResolvingPath(keys)}`;
});
}
}
/**
* Thrown when trying to retrieve an async {@link Binding} using the sync API.
*
* ## Example
*
* ```javascript
* var injector = Injector.resolveAndCreate([
* bind(Number).toAsyncFactory(() => {
* return new Promise((resolve) => resolve(1 + 2));
* }),
* bind(String).toFactory((v) => { return "Value: " + v; }, [String])
* ]);
*
* injector.asyncGet(String).then((v) => expect(v).toBe('Value: 3'));
* expect(() => {
* injector.get(String);
* }).toThrowError(AsycBindingError);
* ```
*
* The above example throws because `String` depends on `Number` which is async. If any binding in
* the dependency graph is async then the graph can only be retrieved using the `asyncGet` API.
*
* @exportedAs angular2/di_errors
*/
export class AsyncBindingError extends AbstractBindingError {
// TODO(tbosch): Can't do key:Key as this results in a circular dependency!
constructor(key) {
super(key, function(keys: List<any>) {
var first = stringify(ListWrapper.first(keys).token);
return `Cannot instantiate ${first} synchronously. It is provided as a promise!${constructResolvingPath(keys)}`;
});
}
}
/**
* Thrown when dependencies form a cycle.
*
* ## Example:
*
* ```javascript
* class A {
* constructor(b:B) {}
* }
* class B {
* constructor(a:A) {}
* }
* ```
*
* Retrieving `A` or `B` throws a `CyclicDependencyError` as the graph above cannot be constructed.
*
* @exportedAs angular2/di_errors
*/
export class CyclicDependencyError extends AbstractBindingError {
// TODO(tbosch): Can't do key:Key as this results in a circular dependency!
constructor(key) {
super(key, function(keys: List<any>) {
return `Cannot instantiate cyclic dependency!${constructResolvingPath(keys)}`;
});
}
}
/**
* Thrown when a constructing type returns with an Error.
*
* The `InstantiationError` class contains the original error plus the dependency graph which caused
* this object to be instantiated.
*
* @exportedAs angular2/di_errors
*/
export class InstantiationError extends AbstractBindingError {
causeKey;
// TODO(tbosch): Can't do key:Key as this results in a circular dependency!
constructor(originalException, originalStack, key) {
super(key, function(keys: List<any>) {
var first = stringify(ListWrapper.first(keys).token);
return `Error during instantiation of ${first}!${constructResolvingPath(keys)}.` +
` ORIGINAL ERROR: ${originalException}` +
`\n\n ORIGINAL STACK: ${originalStack}`;
}, originalException, originalStack);
this.causeKey = key;
}
}
/**
* Thrown when an object other then {@link Binding} (or `Type`) is passed to {@link Injector}
* creation.
*
* @exportedAs angular2/di_errors
*/
export class InvalidBindingError extends BaseException {
message: string;
constructor(binding) {
super();
this.message = "Invalid binding - only instances of Binding and Type are allowed, got: " +
binding.toString();
}
toString(): string { return this.message; }
}
/**
* Thrown when the class has no annotation information.
*
* Lack of annotation information prevents the {@link Injector} from determining which dependencies
* need to be injected into the constructor.
*
* @exportedAs angular2/di_errors
*/
export class NoAnnotationError extends BaseException {
name: string;
message: string;
constructor(typeOrFunc, params: List<List<any>>) {
super();
var signature = [];
for (var i = 0, ii = params.length; i < ii; i++) {
var parameter = params[i];
if (isBlank(parameter) || parameter.length == 0) {
signature.push('?');
} else {
signature.push(ListWrapper.map(parameter, stringify).join(' '));
}
}
this.message = "Cannot resolve all parameters for " + stringify(typeOrFunc) + "(" +
signature.join(', ') + "). " +
'Make sure they all have valid type or annotations.';
}
toString(): string { return this.message; }
}
/**
* Thrown when getting an object by index.
*
* @exportedAs angular2/di_errors
*/
export class OutOfBoundsError extends BaseException {
message: string;
constructor(index) {
super();
this.message = `Index ${index} is out-of-bounds.`;
}
toString(): string { return this.message; }
}<|fim▁end|>
| |
<|file_name|>conv.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging as loggers
import numpy as np
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.signal import downsample
from deepy.utils import build_activation, UniformInitializer
from deepy.layers.layer import NeuralLayer
logging = loggers.getLogger(__name__)
class Convolution(NeuralLayer):
"""
Convolution layer with max-pooling.
"""
def __init__(self, filter_shape, pool_size=(2, 2),
reshape_input=False, border_mode="valid", flatten_output=False,
disable_pooling=False, activation='linear', init=None):
super(Convolution, self).__init__("convolution")
self.filter_shape = filter_shape
self.output_dim = filter_shape[0]
self.pool_size = pool_size
self.reshape_input = reshape_input
self.flatten_output = flatten_output
self.activation = activation
self.disable_pooling = disable_pooling
self.border_mode = border_mode
self.initializer = init if init else self._default_initializer()
def setup(self):
self._setup_params()
self._setup_functions()
def output(self, x):
if self.reshape_input:
img_width = T.cast(T.sqrt(x.shape[1]), "int32")
x = x.reshape((x.shape[0], 1, img_width, img_width), ndim=4)
conv_out = conv.conv2d(
input=x,
filters=self.W_conv,
filter_shape=self.filter_shape,
image_shape=None,
border_mode=self.border_mode
)
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=self.pool_size,
ignore_border=True
)
if self.disable_pooling:
pooled_out = conv_out
output = self._activation_func(pooled_out + self.B_conv.dimshuffle('x', 0, 'x', 'x'))
if self.flatten_output:
output = output.flatten(2)
return output
<|fim▁hole|>
def _setup_params(self):
self.W_conv = self.create_weight(suffix="conv", initializer=self.initializer, shape=self.filter_shape)
self.B_conv = self.create_bias(self.filter_shape[0], suffix="conv")
self.register_parameters(self.W_conv, self.B_conv)
def _default_initializer(self):
fan_in = np.prod(self.filter_shape[1:])
fan_out = (self.filter_shape[0] * np.prod(self.filter_shape[2:]) /
np.prod(self.pool_size))
weight_scale = np.sqrt(6. / (fan_in + fan_out))
return UniformInitializer(scale=weight_scale)<|fim▁end|>
|
def _setup_functions(self):
self._activation_func = build_activation(self.activation)
|
<|file_name|>starter.dev.js<|end_file_name|><|fim▁begin|>;(function($){
$(document).ready( function(){
$('.fa_slider_simple').FeaturedArticles({
slide_selector : '.fa_slide',
nav_prev : '.go-back',
nav_next : '.go-forward',
nav_elem : '.main-nav .fa-nav',
effect : false,
// events
load : load,
before : before,
after : after,
resize : resize,
stop : stop,
start : start
});
});
var resizeDuration = 100;
var load = function(){
var options = this.settings(),
self = this;
this.progressBar = $(this).find('.progress-bar');
this.mouseOver;
// height resize
if( $(this).data('theme_opt_auto_resize') ){
this.sliderHeight = $(this).height();
var h = $( this.slides()[0] ).find(options.content_container).outerHeight() + 100;
setHeight = h > this.sliderHeight ? h : this.sliderHeight;
slide = this.slides()[0];
$(slide).css({
'height' : setHeight
});
self.center_img( slide );
$(this)
.css({
'max-height':'none',
'height' : this.sliderHeight
})
.animate({
'height' : setHeight
},{
queue: false,
duration:resizeDuration ,
complete: function(){
/*
$(this).css({
'max-height':setHeight
});
*/
}
});// end animate
}// end height resize
}
var before = function(d){
var options = this.settings(),
self = this;
if( typeof this.progressBar !== 'undefined' ){
this.progressBar.stop().css({'width':0});
}
// height resize
if( $(this).data('theme_opt_auto_resize') ){
var h = $( d.next ).find(options.content_container).outerHeight() + 100,
setHeight = h > this.sliderHeight ? h : this.sliderHeight;
$(d.next).css({
height : setHeight
});
self.center_img( d.next );
$(this)
.css({
'max-height':'none'
})
.animate({
'height' : setHeight
},{
queue: false,
duration:resizeDuration ,
complete: function(){
$(this).css({'max-height':setHeight});
}
});// end animate
}
// end height resize
}
var resize = function(){
var self = this,
options = this.settings();
// height resize
if( $(this).data('theme_opt_auto_resize') ){
var h = $( this.get_current() ).find(options.content_container).outerHeight() + 100;
this.sliderHeight = $(this).height();;
var setHeight = h > this.sliderHeight ? h : this.sliderHeight;
$( this.get_current() ).css({
height: setHeight
});
self.center_img( self.get_current() );
$(this)
.css({
'max-height':'none',
'height':this.sliderHeight
})
.animate({
'height' : setHeight
},{
queue: false,
duration:resizeDuration ,
complete: function(){
$(this).css({'max-height':setHeight});
}
});
}
// end height resize
}
var after = function(){
var options = this.settings(),
self = this,
duration = options.slide_duration;
//self.center_current_img();
if( this.mouseOver || this.stopped || !options.auto_slide ){
return;
}
<|fim▁hole|> {duration: duration, queue:false, complete: function(){
$(this).css({'width':0});
}
});
}
}
var stop = function(){
if( typeof this.progressBar !== 'undefined' ){
this.progressBar.stop().css({'width':0});
}
this.mouseOver = true;
}
var start = function(){
this.mouseOver = false;
if( this.animating() ){
return;
}
var options = this.settings(),
duration = options.slide_duration;
if( typeof this.progressBar !== 'undefined' ){
this.progressBar.css({width:0}).animate(
{'width' : '100%'},
{duration: duration, queue:false, complete: function(){
$(this).css({'width':0});
}
});
}
}
})(jQuery);<|fim▁end|>
|
if( typeof this.progressBar !== 'undefined' ){
this.progressBar.css({width:0}).animate(
{'width' : '100%'},
|
<|file_name|>TestCeilRelaxed.rs<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Don't edit this file! It is auto-generated by frameworks/rs/api/generate.sh.
<|fim▁hole|><|fim▁end|>
|
#include "TestCeil.rs"
#pragma rs_fp_relaxed
|
<|file_name|>network_codes.py<|end_file_name|><|fim▁begin|>ucodes = {
"U0001" : "High Speed CAN Communication Bus" ,
"U0002" : "High Speed CAN Communication Bus (Performance)" ,
"U0003" : "High Speed CAN Communication Bus (Open)" ,
"U0004" : "High Speed CAN Communication Bus (Low)" ,
"U0005" : "High Speed CAN Communication Bus (High)" ,
"U0006" : "High Speed CAN Communication Bus (Open)" ,
"U0007" : "High Speed CAN Communication Bus (Low)" ,
"U0008" : "High Speed CAN Communication Bus (High)" ,
"U0009" : "High Speed CAN Communication Bus (shorted to Bus)" ,
"U0010" : "Medium Speed CAN Communication Bus" ,
"U0011" : "Medium Speed CAN Communication Bus (Performance)" ,
"U0012" : "Medium Speed CAN Communication Bus (Open)" ,
"U0013" : "Medium Speed CAN Communication Bus (Low)" ,
"U0014" : "Medium Speed CAN Communication Bus (High)" ,
"U0015" : "Medium Speed CAN Communication Bus (Open)" ,
"U0016" : "Medium Speed CAN Communication Bus (Low)" ,
"U0017" : "Medium Speed CAN Communication Bus (High)" ,
"U0018" : "Medium Speed CAN Communication Bus (shorted to Bus)" ,
"U0019" : "Low Speed CAN Communication Bus" ,
"U0020" : "Low Speed CAN Communication Bus (Performance)" ,
"U0021" : "Low Speed CAN Communication Bus (Open)" ,
"U0022" : "Low Speed CAN Communication Bus (Low)" ,
"U0023" : "Low Speed CAN Communication Bus (High)" ,
"U0024" : "Low Speed CAN Communication Bus (Open)" ,
"U0025" : "Low Speed CAN Communication Bus (Low)" ,
"U0026" : "Low Speed CAN Communication Bus (High)" ,
"U0027" : "Low Speed CAN Communication Bus (shorted to Bus)" ,
"U0028" : "Vehicle Communication Bus A" ,
"U0029" : "Vehicle Communication Bus A (Performance)" ,
"U0030" : "Vehicle Communication Bus A (Open)" ,
"U0031" : "Vehicle Communication Bus A (Low)" ,
"U0032" : "Vehicle Communication Bus A (High)" ,
"U0033" : "Vehicle Communication Bus A (Open)" ,
"U0034" : "Vehicle Communication Bus A (Low)" ,
"U0035" : "Vehicle Communication Bus A (High)" ,
"U0036" : "Vehicle Communication Bus A (shorted to Bus A)" ,
"U0037" : "Vehicle Communication Bus B" ,
"U0038" : "Vehicle Communication Bus B (Performance)" ,
"U0039" : "Vehicle Communication Bus B (Open)" ,
"U0040" : "Vehicle Communication Bus B (Low)" ,
"U0041" : "Vehicle Communication Bus B (High)" ,
"U0042" : "Vehicle Communication Bus B (Open)" ,
"U0043" : "Vehicle Communication Bus B (Low)" ,
"U0044" : "Vehicle Communication Bus B (High)" ,
"U0045" : "Vehicle Communication Bus B (shorted to Bus B)" ,
"U0046" : "Vehicle Communication Bus C" ,
"U0047" : "Vehicle Communication Bus C (Performance)" ,
"U0048" : "Vehicle Communication Bus C (Open)" ,
"U0049" : "Vehicle Communication Bus C (Low)" ,
"U0050" : "Vehicle Communication Bus C (High)" ,
"U0051" : "Vehicle Communication Bus C (Open)" ,
"U0052" : "Vehicle Communication Bus C (Low)" ,
"U0053" : "Vehicle Communication Bus C (High)" ,
"U0054" : "Vehicle Communication Bus C (shorted to Bus C)" ,
"U0055" : "Vehicle Communication Bus D" ,
"U0056" : "Vehicle Communication Bus D (Performance)" ,
"U0057" : "Vehicle Communication Bus D (Open)" ,
"U0058" : "Vehicle Communication Bus D (Low)" ,
"U0059" : "Vehicle Communication Bus D (High)" ,
"U0060" : "Vehicle Communication Bus D (Open)" ,
"U0061" : "Vehicle Communication Bus D (Low)" ,
"U0062" : "Vehicle Communication Bus D (High)" ,
"U0063" : "Vehicle Communication Bus D (shorted to Bus D)" ,
"U0064" : "Vehicle Communication Bus E" ,
"U0065" : "Vehicle Communication Bus E (Performance)" ,
"U0066" : "Vehicle Communication Bus E (Open)" ,
"U0067" : "Vehicle Communication Bus E (Low)" ,
"U0068" : "Vehicle Communication Bus E (High)" ,
"U0069" : "Vehicle Communication Bus E (Open)" ,
"U0070" : "Vehicle Communication Bus E (Low)" , <|fim▁hole|> "U0075" : "Reserved by J2012" ,
"U0076" : "Reserved by J2012" ,
"U0077" : "Reserved by J2012" ,
"U0078" : "Reserved by J2012" ,
"U0079" : "Reserved by J2012" ,
"U0080" : "Reserved by J2012" ,
"U0081" : "Reserved by J2012" ,
"U0082" : "Reserved by J2012" ,
"U0083" : "Reserved by J2012" ,
"U0084" : "Reserved by J2012" ,
"U0085" : "Reserved by J2012" ,
"U0086" : "Reserved by J2012" ,
"U0087" : "Reserved by J2012" ,
"U0088" : "Reserved by J2012" ,
"U0089" : "Reserved by J2012" ,
"U0090" : "Reserved by J2012" ,
"U0091" : "Reserved by J2012" ,
"U0092" : "Reserved by J2012" ,
"U0093" : "Reserved by J2012" ,
"U0094" : "Reserved by J2012" ,
"U0095" : "Reserved by J2012" ,
"U0096" : "Reserved by J2012" ,
"U0097" : "Reserved by J2012" ,
"U0098" : "Reserved by J2012" ,
"U0099" : "Reserved by J2012" ,
"U0100" : "Lost Communication With ECM/PCM A" ,
"U0101" : "Lost Communication with TCM" ,
"U0102" : "Lost Communication with Transfer Case Control Module" ,
"U0103" : "Lost Communication With Gear Shift Module" ,
"U0104" : "Lost Communication With Cruise Control Module" ,
"U0105" : "Lost Communication With Fuel Injector Control Module" ,
"U0106" : "Lost Communication With Glow Plug Control Module" ,
"U0107" : "Lost Communication With Throttle Actuator Control Module" ,
"U0108" : "Lost Communication With Alternative Fuel Control Module" ,
"U0109" : "Lost Communication With Fuel Pump Control Module" ,
"U0110" : "Lost Communication With Drive Motor Control Module" ,
"U0111" : "Lost Communication With Battery Energy Control Module 'A'" ,
"U0112" : "Lost Communication With Battery Energy Control Module 'B'" ,
"U0113" : "Lost Communication With Emissions Critical Control Information" ,
"U0114" : "Lost Communication With Four-Wheel Drive Clutch Control Module" ,
"U0115" : "Lost Communication With ECM/PCM B" ,
"U0116" : "Reserved by J2012" ,
"U0117" : "Reserved by J2012" ,
"U0118" : "Reserved by J2012" ,
"U0119" : "Reserved by J2012" ,
"U0120" : "Reserved by J2012" ,
"U0121" : "Lost Communication With Anti-Lock Brake System (ABS) Control Module" ,
"U0122" : "Lost Communication With Vehicle Dynamics Control Module" ,
"U0123" : "Lost Communication With Yaw Rate Sensor Module" ,
"U0124" : "Lost Communication With Lateral Acceleration Sensor Module" ,
"U0125" : "Lost Communication With Multi-axis Acceleration Sensor Module" ,
"U0126" : "Lost Communication With Steering Angle Sensor Module" ,
"U0127" : "Lost Communication With Tire Pressure Monitor Module" ,
"U0128" : "Lost Communication With Park Brake Control Module" ,
"U0129" : "Lost Communication With Brake System Control Module" ,
"U0130" : "Lost Communication With Steering Effort Control Module" ,
"U0131" : "Lost Communication With Power Steering Control Module" ,
"U0132" : "Lost Communication With Ride Level Control Module" ,
"U0133" : "Reserved by J2012" ,
"U0134" : "Reserved by J2012" ,
"U0135" : "Reserved by J2012" ,
"U0136" : "Reserved by J2012" ,
"U0137" : "Reserved by J2012" ,
"U0138" : "Reserved by J2012" ,
"U0139" : "Reserved by J2012" ,
"U0140" : "Lost Communication With Body Control Module" ,
"U0141" : "Lost Communication With Body Control Module 'A'" ,
"U0142" : "Lost Communication With Body Control Module 'B'" ,
"U0143" : "Lost Communication With Body Control Module 'C'" ,
"U0144" : "Lost Communication With Body Control Module 'D'" ,
"U0145" : "Lost Communication With Body Control Module 'E'" ,
"U0146" : "Lost Communication With Gateway 'A'" ,
"U0147" : "Lost Communication With Gateway 'B'" ,
"U0148" : "Lost Communication With Gateway 'C'" ,
"U0149" : "Lost Communication With Gateway 'D'" ,
"U0150" : "Lost Communication With Gateway 'E'" ,
"U0151" : "Lost Communication With Restraints Control Module" ,
"U0152" : "Lost Communication With Side Restraints Control Module Left" ,
"U0153" : "Lost Communication With Side Restraints Control Module Right" ,
"U0154" : "Lost Communication With Restraints Occupant Sensing Control Module" ,
"U0155" : "Lost Communication With Instrument Panel Cluster (IPC) Control Module" ,
"U0156" : "Lost Communication With Information Center 'A'" ,
"U0157" : "Lost Communication With Information Center 'B'" ,
"U0158" : "Lost Communication With Head Up Display" ,
"U0159" : "Lost Communication With Parking Assist Control Module" ,
"U0160" : "Lost Communication With Audible Alert Control Module" ,
"U0161" : "Lost Communication With Compass Module" ,
"U0162" : "Lost Communication With Navigation Display Module" ,
"U0163" : "Lost Communication With Navigation Control Module" ,
"U0164" : "Lost Communication With HVAC Control Module" ,
"U0165" : "Lost Communication With HVAC Control Module Rear" ,
"U0166" : "Lost Communication With Auxiliary Heater Control Module" ,
"U0167" : "Lost Communication With Vehicle Immobilizer Control Module" ,
"U0168" : "Lost Communication With Vehicle Security Control Module" ,
"U0169" : "Lost Communication With Sunroof Control Module" ,
"U0170" : "Lost Communication With 'Restraints System Sensor A'" ,
"U0171" : "Lost Communication With 'Restraints System Sensor B'" ,
"U0172" : "Lost Communication With 'Restraints System Sensor C'" ,
"U0173" : "Lost Communication With 'Restraints System Sensor D'" ,
"U0174" : "Lost Communication With 'Restraints System Sensor E'" ,
"U0175" : "Lost Communication With 'Restraints System Sensor F'" ,
"U0176" : "Lost Communication With 'Restraints System Sensor G'" ,
"U0177" : "Lost Communication With 'Restraints System Sensor H'" ,
"U0178" : "Lost Communication With 'Restraints System Sensor I'" ,
"U0179" : "Lost Communication With 'Restraints System Sensor J'" ,
"U0180" : "Lost Communication With Automatic Lighting Control Module" ,
"U0181" : "Lost Communication With Headlamp Leveling Control Module" ,
"U0182" : "Lost Communication With Lighting Control Module Front" ,
"U0183" : "Lost Communication With Lighting Control Module Rear" ,
"U0184" : "Lost Communication With Radio" ,
"U0185" : "Lost Communication With Antenna Control Module" ,
"U0186" : "Lost Communication With Audio Amplifier" ,
"U0187" : "Lost Communication With Digital Disc Player/Changer Module 'A'" ,
"U0188" : "Lost Communication With Digital Disc Player/Changer Module 'B'" ,
"U0189" : "Lost Communication With Digital Disc Player/Changer Module 'C'" ,
"U0190" : "Lost Communication With Digital Disc Player/Changer Module 'D'" ,
"U0191" : "Lost Communication With Television" ,
"U0192" : "Lost Communication With Personal Computer" ,
"U0193" : "Lost Communication With 'Digital Audio Control Module A'" ,
"U0194" : "Lost Communication With 'Digital Audio Control Module B'" ,
"U0195" : "Lost Communication With Subscription Entertainment Receiver Module" ,
"U0196" : "Lost Communication With Rear Seat Entertainment Control Module" ,
"U0197" : "Lost Communication With Telephone Control Module" ,
"U0198" : "Lost Communication With Telematic Control Module" ,
"U0199" : "Lost Communication With 'Door Control Module A'" ,
"U0200" : "Lost Communication With 'Door Control Module B'" ,
"U0201" : "Lost Communication With 'Door Control Module C'" ,
"U0202" : "Lost Communication With 'Door Control Module D'" ,
"U0203" : "Lost Communication With 'Door Control Module E'" ,
"U0204" : "Lost Communication With 'Door Control Module F'" ,
"U0205" : "Lost Communication With 'Door Control Module G'" ,
"U0206" : "Lost Communication With Folding Top Control Module" ,
"U0207" : "Lost Communication With Moveable Roof Control Module" ,
"U0208" : "Lost Communication With 'Seat Control Module A'" ,
"U0209" : "Lost Communication With 'Seat Control Module B'" ,
"U0210" : "Lost Communication With 'Seat Control Module C'" ,
"U0211" : "Lost Communication With 'Seat Control Module D'" ,
"U0212" : "Lost Communication With Steering Column Control Module" ,
"U0213" : "Lost Communication With Mirror Control Module" ,
"U0214" : "Lost Communication With Remote Function Actuation" ,
"U0215" : "Lost Communication With 'Door Switch A'" ,
"U0216" : "Lost Communication With 'Door Switch B'" ,
"U0217" : "Lost Communication With 'Door Switch C'" ,
"U0218" : "Lost Communication With 'Door Switch D'" ,
"U0219" : "Lost Communication With 'Door Switch E'" ,
"U0220" : "Lost Communication With 'Door Switch F'" ,
"U0221" : "Lost Communication With 'Door Switch G'" ,
"U0222" : "Lost Communication With 'Door Window Motor A'" ,
"U0223" : "Lost Communication With 'Door Window Motor B'" ,
"U0224" : "Lost Communication With 'Door Window Motor C'" ,
"U0225" : "Lost Communication With 'Door Window Motor D'" ,
"U0226" : "Lost Communication With 'Door Window Motor E'" ,
"U0227" : "Lost Communication With 'Door Window Motor F'" ,
"U0228" : "Lost Communication With 'Door Window Motor G'" ,
"U0229" : "Lost Communication With Heated Steering Wheel Module" ,
"U0230" : "Lost Communication With Rear Gate Module" ,
"U0231" : "Lost Communication With Rain Sensing Module" ,
"U0232" : "Lost Communication With Side Obstacle Detection Control Module Left" ,
"U0233" : "Lost Communication With Side Obstacle Detection Control Module Right" ,
"U0234" : "Lost Communication With Convenience Recall Module" ,
"U0235" : "Lost Communication With Cruise Control Front Distance Range Sensor" ,
"U0300" : "Internal Control Module Software Incompatibility" ,
"U0301" : "Software Incompatibility with ECM/PCM" ,
"U0302" : "Software Incompatibility with Transmission Control Module" ,
"U0303" : "Software Incompatibility with Transfer Case Control Module" ,
"U0304" : "Software Incompatibility with Gear Shift Control Module" ,
"U0305" : "Software Incompatibility with Cruise Control Module" ,
"U0306" : "Software Incompatibility with Fuel Injector Control Module" ,
"U0307" : "Software Incompatibility with Glow Plug Control Module" ,
"U0308" : "Software Incompatibility with Throttle Actuator Control Module" ,
"U0309" : "Software Incompatibility with Alternative Fuel Control Module" ,
"U0310" : "Software Incompatibility with Fuel Pump Control Module" ,
"U0311" : "Software Incompatibility with Drive Motor Control Module" ,
"U0312" : "Software Incompatibility with Battery Energy Control Module A" ,
"U0313" : "Software Incompatibility with Battery Energy Control Module B" ,
"U0314" : "Software Incompatibility with Four-Wheel Drive Clutch Control Module" ,
"U0315" : "Software Incompatibility with Anti-Lock Brake System Control Module" ,
"U0316" : "Software Incompatibility with Vehicle Dynamics Control Module" ,
"U0317" : "Software Incompatibility with Park Brake Control Module" ,
"U0318" : "Software Incompatibility with Brake System Control Module" ,
"U0319" : "Software Incompatibility with Steering Effort Control Module" ,
"U0320" : "Software Incompatibility with Power Steering Control Module" ,
"U0321" : "Software Incompatibility with Ride Level Control Module" ,
"U0322" : "Software Incompatibility with Body Control Module" ,
"U0323" : "Software Incompatibility with Instrument Panel Control Module" ,
"U0324" : "Software Incompatibility with HVAC Control Module" ,
"U0325" : "Software Incompatibility with Auxiliary Heater Control Module" ,
"U0326" : "Software Incompatibility with Vehicle Immobilizer Control Module" ,
"U0327" : "Software Incompatibility with Vehicle Security Control Module" ,
"U0328" : "Software Incompatibility with Steering Angle Sensor Module" ,
"U0329" : "Software Incompatibility with Steering Column Control Module" ,
"U0330" : "Software Incompatibility with Tire Pressure Monitor Module" ,
"U0331" : "Software Incompatibility with Body Control Module 'A'" ,
"U0400" : "Invalid Data Received" ,
"U0401" : "Invalid Data Received From ECM/PCM" ,
"U0402" : "Invalid Data Received From Transmission Control Module" ,
"U0403" : "Invalid Data Received From Transfer Case Control Module" ,
"U0404" : "Invalid Data Received From Gear Shift Control Module" ,
"U0405" : "Invalid Data Received From Cruise Control Module" ,
"U0406" : "Invalid Data Received From Fuel Injector Control Module" ,
"U0407" : "Invalid Data Received From Glow Plug Control Module" ,
"U0408" : "Invalid Data Received From Throttle Actuator Control Module" ,
"U0409" : "Invalid Data Received From Alternative Fuel Control Module" ,
"U0410" : "Invalid Data Received From Fuel Pump Control Module" ,
"U0411" : "Invalid Data Received From Drive Motor Control Module" ,
"U0412" : "Invalid Data Received From Battery Energy Control Module A" ,
"U0413" : "Invalid Data Received From Battery Energy Control Module B" ,
"U0414" : "Invalid Data Received From Four-Wheel Drive Clutch Control Module" ,
"U0415" : "Invalid Data Received From Anti-Lock Brake System Control Module" ,
"U0416" : "Invalid Data Received From Vehicle Dynamics Control Module" ,
"U0417" : "Invalid Data Received From Park Brake Control Module" ,
"U0418" : "Invalid Data Received From Brake System Control Module" ,
"U0419" : "Invalid Data Received From Steering Effort Control Module" ,
"U0420" : "Invalid Data Received From Power Steering Control Module" ,
"U0421" : "Invalid Data Received From Ride Level Control Module" ,
"U0422" : "Invalid Data Received From Body Control Module" ,
"U0423" : "Invalid Data Received From Instrument Panel Control Module" ,
"U0424" : "Invalid Data Received From HVAC Control Module" ,
"U0425" : "Invalid Data Received From Auxiliary Heater Control Module" ,
"U0426" : "Invalid Data Received From Vehicle Immobilizer Control Module" ,
"U0427" : "Invalid Data Received From Vehicle Security Control Module" ,
"U0428" : "Invalid Data Received From Steering Angle Sensor Module" ,
"U0429" : "Invalid Data Received From Steering Column Control Module" ,
"U0430" : "Invalid Data Received From Tire Pressure Monitor Module" ,
"U0431" : "Invalid Data Received From Body Control Module 'A'"
}<|fim▁end|>
|
"U0071" : "Vehicle Communication Bus E (High)" ,
"U0072" : "Vehicle Communication Bus E (shorted to Bus E)" ,
"U0073" : "Control Module Communication Bus Off" ,
"U0074" : "Reserved by J2012" ,
|
<|file_name|>metadata.py<|end_file_name|><|fim▁begin|># Copyright 2017 Google Inc. and Skytruth Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#<|fim▁hole|># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict, namedtuple
import csv
import datetime
import dateutil.parser
import pytz
import logging
import os
import sys
import tensorflow as tf
import yaml
import numpy as np
import hashlib
import six
from .feature_generation.file_iterator import GCSFile
""" The main column for vessel classification. """
PRIMARY_VESSEL_CLASS_COLUMN = 'label'
#TODO: (bitsofbits) think about extracting to config file
# The 'real' categories for multihotness are the fine categories, which 'coarse' and 'fishing'
# are defined in terms of. Any number of coarse categories, even with overlapping values can
# be defined in principle, although at present the interaction between the mulithot and non multihot
# versions makes that more complicated.
try:
yaml_load = yaml.safe_load
except:
yaml_load = yaml.load
raw_schema = '''
unknown:
non_fishing:
passenger:
gear:
fish_factory:
cargo_or_tanker:
bunker_or_tanker:
bunker:
tanker:
cargo_or_reefer:
cargo:
reefer:
specialized_reefer:
container_reefer:
fish_tender:
well_boat:
patrol_vessel:
research:
dive_vessel:
submarine:
dredge_non_fishing:
supply_vessel:
tug:
seismic_vessel:
helicopter:
other_not_fishing:
fishing:
squid_jigger:
drifting_longlines:
pole_and_line:
other_fishing:
trollers:
fixed_gear:
pots_and_traps:
set_longlines:
set_gillnets:
trawlers:
dredge_fishing:
seiners:
purse_seines:
tuna_purse_seines:
other_purse_seines:
other_seines:
driftnets:
'''
schema = yaml.safe_load(raw_schema)
def atomic(obj):
for k, v in obj.items():
if v is None or isinstance(v, str):
yield k
else:
for x in atomic(v):
yield x
def categories(obj, include_atomic=True):
for k, v in obj.items():
if v is None or isinstance(v, str):
if include_atomic:
yield k, [k]
else:
yield (k, list(atomic(v)))
for x in categories(v, include_atomic=include_atomic):
yield x
VESSEL_CLASS_DETAILED_NAMES = sorted(atomic(schema))
VESSEL_CATEGORIES = sorted(categories(schema))
TRAINING_SPLIT = 'Training'
TEST_SPLIT = 'Test'
FishingRange = namedtuple('FishingRange',
['start_time', 'end_time', 'is_fishing'])
def stable_hash(x):
x = six.ensure_binary(x)
digest = hashlib.blake2b(six.ensure_binary(x)).hexdigest()[-8:]
return int(digest, 16)
class VesselMetadata(object):
def __init__(self,
metadata_dict,
fishing_ranges_map):
self.metadata_by_split = metadata_dict
self.metadata_by_id = {}
self.fishing_ranges_map = fishing_ranges_map
self.id_map_int2bytes = {}
for split, vessels in metadata_dict.items():
for id_, data in vessels.items():
id_ = six.ensure_binary(id_)
self.metadata_by_id[id_] = data
idhash = stable_hash(id_)
self.id_map_int2bytes[idhash] = id_
intersection_ids = set(self.metadata_by_id.keys()).intersection(
set(fishing_ranges_map.keys()))
logging.info("Metadata for %d ids.", len(self.metadata_by_id))
logging.info("Fishing ranges for %d ids.", len(fishing_ranges_map))
logging.info("Vessels with both types of data: %d",
len(intersection_ids))
def vessel_weight(self, id_):
return self.metadata_by_id[id_][1]
def vessel_label(self, label_name, id_):
return self.metadata_by_id[id_][0][label_name]
def ids_for_split(self, split):
assert split in (TRAINING_SPLIT, TEST_SPLIT)
# Check to make sure we don't have leakage
if (set(self.metadata_by_split[TRAINING_SPLIT].keys()) &
set(self.metadata_by_split[TEST_SPLIT].keys())):
logging.warning('id in both training and test split')
return self.metadata_by_split[split].keys()
def weighted_training_list(self,
random_state,
split,
max_replication_factor,
row_filter=lambda row: True,
boundary=1):
replicated_ids = []
logging.info("Training ids: %d", len(self.ids_for_split(split)))
fishing_ranges_ids = []
for id_, (row, weight) in self.metadata_by_split[split].items():
if row_filter(row):
if id_ in self.fishing_ranges_map:
fishing_ranges_ids.append(id_)
weight = min(weight, max_replication_factor)
int_n = int(weight)
replicated_ids += ([id_] * int_n)
frac_n = weight - float(int_n)
if (random_state.uniform(0.0, 1.0) <= frac_n):
replicated_ids.append(id_)
missing = (-len(replicated_ids)) % boundary
if missing:
replicated_ids = np.concatenate(
[replicated_ids,
np.random.choice(replicated_ids, missing)])
random_state.shuffle(replicated_ids)
logging.info("Replicated training ids: %d", len(replicated_ids))
logging.info("Fishing range ids: %d", len(fishing_ranges_ids))
return replicated_ids
def fishing_range_only_list(self, random_state, split):
replicated_ids = []
fishing_id_set = set(
[k for (k, v) in self.fishing_ranges_map.items() if v])
fishing_range_only_ids = [id_
for id_ in self.ids_for_split(split)
if id_ in fishing_id_set]
logging.info("Fishing range training ids: %d / %d",
len(fishing_range_only_ids),
len(self.ids_for_split(split)))
return fishing_range_only_ids
def read_vessel_time_weighted_metadata_lines(available_ids, lines,
fishing_range_dict, split):
""" For a set of vessels, read metadata; use flat weights
Args:
available_ids: a set of all ids for which we have feature data.
lines: a list of comma-separated vessel metadata lines. Columns are
the id and a set of vessel type columns, containing at least one
called 'label' being the primary/coarse type of the vessel e.g.
(Longliner/Passenger etc.).
fishing_range_dict: dictionary of mapping id to lists of fishing ranges
Returns:
A VesselMetadata object with weights and labels for each vessel.
"""
metadata_dict = {TRAINING_SPLIT : {}, TEST_SPLIT : {}}
min_time_per_id = np.inf
for row in lines:
id_ = six.ensure_binary(row['id'].strip())
if id_ in available_ids:
if id_ not in fishing_range_dict:
continue
# Is this id included only to supress false positives
# Symptoms; fishing score for this id never different from 0
item_split = raw_item_split = row['split']
if raw_item_split in '0123456789':
if int(raw_item_split) == split:
item_split = TEST_SPLIT
else:
item_split = TRAINING_SPLIT
if item_split not in (TRAINING_SPLIT, TEST_SPLIT):
logging.warning(
'id %s has no valid split assigned (%s); using for Training',
id_, split)
split = TRAINING_SPLIT
time_for_this_id = 0
for rng in fishing_range_dict[id_]:
time_for_this_id += (
rng.end_time - rng.start_time).total_seconds()
metadata_dict[item_split][id_] = (row, time_for_this_id)
if split is None and raw_item_split in '0123456789':
# Test on everything even though we are training on everything
metadata_dict[TEST_SPLIT][id_] = (row, time_for_this_id)
if time_for_this_id:
min_time_per_id = min(min_time_per_id, time_for_this_id)
# This weighting is fiddly. We are keeping it for now to match up
# with older data, but should replace when we move to sets, etc.
MAX_WEIGHT = 100.0
for split_dict in metadata_dict.values():
for id_ in split_dict:
row, time = split_dict[id_]
split_dict[id_] = (row, min(MAX_WEIGHT, time / min_time_per_id))
return VesselMetadata(metadata_dict, fishing_range_dict)
def read_vessel_time_weighted_metadata(available_ids,
metadata_file,
fishing_range_dict={},
split=0):
reader = metadata_file_reader(metadata_file)
return read_vessel_time_weighted_metadata_lines(available_ids, reader,
fishing_range_dict,
split)
def read_vessel_multiclass_metadata_lines(available_ids, lines,
fishing_range_dict):
""" For a set of vessels, read metadata and calculate class weights.
Args:
available_ids: a set of all ids for which we have feature data.
lines: a list of comma-separated vessel metadata lines. Columns are
the id and a set of vessel type columns, containing at least one
called 'label' being the primary/coarse type of the vessel e.g.
(Longliner/Passenger etc.).
fishing_range_dict: dictionary of mapping id to lists of fishing ranges
Returns:
A VesselMetadata object with weights and labels for each vessel.
"""
vessel_type_set = set()
dataset_kind_counts = defaultdict(lambda: defaultdict(lambda: 0))
vessel_types = []
cat_map = {k: v for (k, v) in VESSEL_CATEGORIES}
available_ids = set(available_ids)
for row in lines:
id_ = six.ensure_binary(row['id'].strip())
if id_ not in available_ids:
continue
raw_vessel_type = row[PRIMARY_VESSEL_CLASS_COLUMN]
if not raw_vessel_type:
continue
atomic_types = set()
for kind in raw_vessel_type.split('|'):
try:
for atm in cat_map[kind]:
atomic_types.add(atm)
except StandardError as err:
logging.warning('unknown vessel type: {}\n{}'.format(kind, err))
if not atomic_types:
continue
scale = 1.0 / len(atomic_types)
split = row['split'].strip()
assert split in ('Training', 'Test'), repr(split)
vessel_types.append((id_, split, raw_vessel_type, row))
for atm in atomic_types:
dataset_kind_counts[split][atm] += scale
vessel_type_set |= atomic_types
# else:
# logging.warning('No training data for %s, (%s) %s %s', id_, sorted(available_ids)[:10],
# type(id_), type(sorted(available_ids)[0]))
# # Calculate weights for each vessel type per split, for
# # now use weights of sqrt(max_count / count)
dataset_kind_weights = defaultdict(lambda: {})
for split, counts in dataset_kind_counts.items():
max_count = max(counts.values())
for atomic_vessel_type, count in counts.items():
dataset_kind_weights[split][atomic_vessel_type] = np.sqrt(max_count / float(count))
metadata_dict = defaultdict(lambda: {})
for id_, split, raw_vessel_type, row in vessel_types:
if split == 'Training':
weights = []
for kind in raw_vessel_type.split('|'):
for atm in cat_map.get(kind, 'unknown'):
weights.append(dataset_kind_weights[split][atm])
metadata_dict[split][id_] = (row, np.mean(weights))
elif split == "Test":
metadata_dict[split][id_] = (row, 1.0)
else:
logging.warning("unknown split {}".format(split))
if len(vessel_type_set) == 0:
logging.fatal('No vessel types found for training.')
sys.exit(-1)
logging.info("Vessel types: %s", list(vessel_type_set))
return VesselMetadata(
dict(metadata_dict), fishing_range_dict)
def metadata_file_reader(metadata_file):
"""
"""
with open(metadata_file, 'r') as f:
reader = csv.DictReader(f)
logging.info("Metadata columns: %s", reader.fieldnames)
for row in reader:
yield row
def read_vessel_multiclass_metadata(available_ids,
metadata_file,
fishing_range_dict={}):
reader = metadata_file_reader(metadata_file)
return read_vessel_multiclass_metadata_lines(
available_ids, reader, fishing_range_dict)
def find_available_ids(feature_path):
with tf.Session() as sess:
logging.info('Reading id list file.')
root_output_path = os.path.dirname(feature_path)
# The feature pipeline stage that outputs the id list is sharded to only
# produce a single file, so no need to glob or loop here.
id_path = os.path.join(root_output_path, 'ids/part-00000-of-00001.txt')
logging.info('Reading id list file from {}'.format(id_path))
with GCSFile(id_path) as f:
els = f.read().split(b'\n')
id_list = [id_.strip() for id_ in els if id_.strip() != '']
logging.info('Found %d ids.', len(id_list))
return set(id_list)
def parse_date(date):
try:
unix_timestamp = float(date)
return datetime.datetime.utcfromtimestamp(unix_timestamp).replace(
tzinfo=pytz.utc)
except:
try:
return dateutil.parser.parse(date)
except:
logging.fatal('could not parse date "{}"'.format(date))
raise
def read_fishing_ranges(fishing_range_file):
""" Read vessel fishing ranges, return a dict of id to classified fishing
or non-fishing ranges for that vessel.
"""
fishing_range_dict = defaultdict(lambda: [])
with open(fishing_range_file, 'r') as f:
for l in f.readlines()[1:]:
els = l.split(',')
id_ = six.ensure_binary(els[0].strip())
start_time = parse_date(els[1]).replace(tzinfo=pytz.utc)
end_time = parse_date(els[2]).replace(tzinfo=pytz.utc)
is_fishing = float(els[3])
fishing_range_dict[id_].append(
FishingRange(start_time, end_time, is_fishing))
return dict(fishing_range_dict)
def build_multihot_lookup_table():
n_base = len(VESSEL_CLASS_DETAILED_NAMES)
n_categories = len(VESSEL_CATEGORIES)
#
table = np.zeros([n_categories, n_base], dtype=np.int32)
for i, (_, base_labels) in enumerate(VESSEL_CATEGORIES):
for lbl in base_labels:
j = VESSEL_CLASS_DETAILED_NAMES.index(lbl)
table[i, j] = 1
return table
multihot_lookup_table = build_multihot_lookup_table()
def multihot_encode(label):
"""Multihot encode based on fine, coarse and is_fishing label
Args:
label: Tensor (int)
Returns:
Tensor with bits set for every allowable vessel type based on the inputs
"""
tf_multihot_lookup_table = tf.convert_to_tensor(multihot_lookup_table)
return tf.gather(tf_multihot_lookup_table, label)<|fim▁end|>
|
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
|
<|file_name|>rollup.config.js<|end_file_name|><|fim▁begin|>import svelte from 'rollup-plugin-svelte';
import resolve from 'rollup-plugin-node-resolve';
import commonjs from 'rollup-plugin-commonjs';
import livereload from 'rollup-plugin-livereload';
import { terser } from 'rollup-plugin-terser';
import copy from 'rollup-plugin-cpy'
const production = !process.env.ROLLUP_WATCH;
// 1. If in production, we build the App.svelte component that would be injected into the parent application host
// 2. If in development, we build the stand alone main.js entry which would be used inside stand alone index.html
const inputFile = production ? 'src/App.svelte' : 'src/main.js';
// 1. If in production, we build an EcmaScript bundle.mjs module
// 2. If in development, we build a standard Javascript bundle.js
const outputFile = production ? 'public/bundle.mjs' : 'public/bundle.js';
export default {
input: inputFile,
output: {
sourcemap: true,
// In production, we build an EcmaScript module (ESM)
// In development, we build an Immediately Invoked Function Expression (IIFE)
format: production ? 'esm' : 'iife',<|fim▁hole|> svelte({
// enable run-time checks when not in production
dev: !production,
// we'll extract any component CSS out into
// a separate file — better for performance
css: css => {
css.write('public/bundle.css');
}
}),
// If you have external dependencies installed from
// npm, you'll most likely need these plugins. In
// some cases you'll need additional configuration —
// consult the documentation for details:
// https://github.com/rollup/rollup-plugin-commonjs
resolve(),
commonjs(),
// Watch the `public` directory and refresh the
// browser on changes when not in production
!production && livereload('public'),
// If we're building for production (npm run build
// instead of npm run dev), minify
production && terser(),
// TODO! Only copy at build time
production && copy({
// Copy EcmaScript modules and dependent resources from public folder
files: ['public/*.mjs', 'public/*.mjs.map', 'public/bundle.css', 'public/*.css.map'],
// To external folder static-apps from where the parent application host can load it
dest: '../../static-apps/hello-world',
options: {
verbose: true
}
})
]
};<|fim▁end|>
|
name: 'app',
file: outputFile
},
plugins: [
|
<|file_name|>account.box.edit.location.js<|end_file_name|><|fim▁begin|>(function () {
'use strict';
angular
.module('openSenseMapApp')
.controller('EditBoxLocationController', EditBoxLocationController);
EditBoxLocationController.$inject = ['$scope', 'boxData', 'notifications', 'AccountService', 'Box'];
function EditBoxLocationController ($scope, boxData, notifications, AccountService, Box) {
var vm = this;
vm.editMarkerInput = {};
vm.originalPosition = {};
vm.save = save;
vm.resetPosition = resetPosition;
activate();
////
function activate () {
var icon = '';
var color = '';
if (boxData.exposure === 'indoor' || boxData.exposure === 'outdoor') {
icon = 'cube';
color = 'green';
}
if (boxData.exposure === 'mobile') {
icon = 'rocket';
color = 'blue';
}
var marker = L.AwesomeMarkers.icon({
type: 'awesomeMarker',
prefix: 'fa',
icon: icon,
markerColor: color
});
var lat = parseFloat(boxData.currentLocation.coordinates[1].toFixed(6));
var lng = parseFloat(boxData.currentLocation.coordinates[0].toFixed(6));
vm.boxPosition = {
layerName: 'registration',
lng: lng,
lat: lat,
latLng: [lat, lng],
height: boxData.currentLocation.coordinates[2],
draggable: true,
zoom: 17,
icon: marker
};
angular.copy(vm.boxPosition, vm.originalPosition);
vm.editMarker = {
m1: angular.copy(vm.boxPosition)
};
angular.copy(vm.boxPosition, vm.editMarkerInput);
}
<|fim▁hole|> notifications.addAlert('info', 'NOTIFICATION_BOX_UPDATE_SUCCESS');
})
.catch(function () {
notifications.addAlert('danger', 'NOTIFICATION_BOX_UPDATE_FAILED');
});
}
function resetPosition () {
vm.editMarker = { m1: angular.copy(vm.originalPosition) };
vm.editMarkerInput = angular.copy(vm.originalPosition);
vm.editMarker.m1.draggable = true;
}
function setCoordinates (coords) {
vm.editMarker = {
m1: angular.copy(vm.originalPosition)
};
var lng = parseFloat(coords.lng.toFixed(6));
var lat = parseFloat(coords.lat.toFixed(6));
vm.editMarker.m1.lng = lng;
vm.editMarker.m1.lat = lat;
vm.editMarker.m1.latLng = [lat, lng];
vm.editMarker.m1.height = coords.height;
vm.editMarkerInput.lng = vm.editMarker.m1.lng;
vm.editMarkerInput.lat = vm.editMarker.m1.lat;
}
////
$scope.$on('osemMapClick.map_edit', function (e, args) {
setCoordinates(args.latlng);
});
$scope.$on('osemMarkerDragend.map_edit', function (e, args) {
setCoordinates(args.target._latlng);
});
$scope.$watchCollection('location.editMarkerInput', function (newValue) {
if (newValue && newValue.lat && newValue.lng) {
setCoordinates({
lng: newValue.lng,
lat: newValue.lat,
height: newValue.height,
});
}
});
}
})();<|fim▁end|>
|
function save () {
return AccountService.updateBox(boxData._id, { location: vm.editMarker.m1 })
.then(function (response) {
angular.copy(new Box(response.data), boxData);
|
<|file_name|>value.rs<|end_file_name|><|fim▁begin|>use std::fmt::{Debug, Display, Formatter, Error};
use std::fmt::Result as FmtResult;
use num::Float;
use std::mem;
use super::clip::{ClipHolder};
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
pub enum Value {
Int(i64),
Float(FloatWrap),
Bool(bool),
String(String),
Tuple(Vec<Value>),
Clip(ClipHolder),<|fim▁hole|>
impl Display for Value {
fn fmt<'r>(&'r self, formatter: &mut Formatter) -> FmtResult {
match self {
&Value::Int(i) => write!(formatter, "{}", i),
&Value::Float(ref f) => write!(formatter, "{}", f),
&Value::Bool(b) => write!(formatter, "{}", b),
&Value::String(ref s) => write!(formatter, "{}", s),
&Value::Tuple(ref v) => {
match write!(formatter, "(") {
Ok(()) => (),
Err(e) => {return Err(e);}
}
let len = v.len();
for (idx, val) in v.iter().enumerate() {
match write!(formatter, "{}", val) {
Ok(()) => (),
Err(e) => {return Err(e);}
}
if idx != len - 1 {
match write!(formatter, ", ") {
Ok(()) => (),
Err(e) => {return Err(e);}
}
}
}
match write!(formatter, ")") {
Ok(()) => Ok(()),
Err(e) => Err(e)
}
}
&Value::Clip(_) => write!(formatter, "<Clip>"),
&Value::Nil => write!(formatter, "nil"),
}
}
}
#[derive(PartialEq, Eq, Hash, Clone)]
pub struct FloatWrap(u64);
impl FloatWrap {
pub fn new(mut val: f64) -> FloatWrap {
// make all NaNs have the same representation
if val.is_nan() {
val = Float::nan()
}
unsafe {
FloatWrap(mem::transmute(val))
}
}
pub fn get(&self) -> f64 {
let cl = self.clone();
unsafe {
mem::transmute(cl)
}
}
}
impl Debug for FloatWrap {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{:?}", self.get())
}
}
impl Display for FloatWrap {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f, "{}", self.get())
}
}<|fim▁end|>
|
Nil
}
|
<|file_name|>decl.rs<|end_file_name|><|fim▁begin|>use easter::decl::{Dtor, DtorExt};
use unjson::ty::Object;
use result::Result;
use error::Error;
use node::ExtractNode;
pub trait IntoDecl {
fn into_dtor(self) -> Result<Dtor>;
}
impl IntoDecl for Object {
fn into_dtor(mut self) -> Result<Dtor> {
let lhs = try!(self.extract_patt("id"));
let init = try!(self.extract_expr_opt("init"));<|fim▁hole|> }
}<|fim▁end|>
|
Dtor::from_init_opt(lhs, init).map_err(Error::UninitializedPattern)
|
<|file_name|>main.js<|end_file_name|><|fim▁begin|>/**
* Copyright (c) 2014 Famous Industries, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* @license MIT
*/
/**
* HeaderFooterLayout
* ------------------
*
* HeaderFooterLayout is a layout which will arrange three renderables
* into a header and footer area of defined size and a content area
* of flexible size.
*
* In this example we create a basic HeaderFooterLayout and define a
* size for the header and footer
*/
define(function(require, exports, module) {
var Engine = require('famous/core/Engine');
var Surface = require('famous/core/Surface');
var Modifier = require('famous/core/Modifier');
var StateModifier = require('famous/modifiers/StateModifier');
var Transform = require('famous/core/Transform');
var HeaderFooterLayout = require('famous/views/HeaderFooterLayout');
var Easing = require('famous/transitions/Easing');
var RenderController = require("famous/views/RenderController");
var MenuView = require('./views/MenuView');
var PlayHeaderView = require('./views/PlayHeaderView');
var PlayBodyView = require('./views/PlayBodyView');
var PlayFooterView = require('./views/PlayFooterView');
var Transitionable = require('famous/transitions/Transitionable');
var SpringTransition = require('famous/transitions/SpringTransition');
Transitionable.registerMethod('spring', SpringTransition);
var mainContext = Engine.createContext();
var layout = new HeaderFooterLayout({<|fim▁hole|> });
layout.header.add(PlayHeaderView);
//position to the center
var bodyRenderController = new RenderController();
layout.content.add(bodyRenderController);
var bodySurfaces = [];
bodySurfaces.push(PlayBodyView);
bodySurfaces.push(MenuView);
bodyRenderController.show(bodySurfaces[0]);
PlayBodyView.eventHandler.on('seekToPosition', function(data) {
PlayHeaderView.setIsPlaying(false);
});
PlayBodyView.eventHandler.on('finishedSpeaking', function(data) {
PlayHeaderView.setIsPlaying(true);
});
var togglemenu = false;
PlayHeaderView.eventHandler.on('showMenu', function(data) {
bodySurfaces[1].toggle();
togglemenu = !togglemenu;
if (togglemenu) {
bodyRenderController.show(bodySurfaces[1]);
} else {
bodyRenderController.show(bodySurfaces[0]);
}
});
PlayHeaderView.eventHandler.on('shouldFlipViews', function(data) {
PlayBodyView.flip();
});
PlayHeaderView.eventHandler.on('shouldPlay', function(data) {
PlayBodyView.play();
});
PlayHeaderView.eventHandler.on('toTop', function(data) {
PlayBodyView.scrollTo(0);
});
MenuView.eventHandler.on('changeContent', function(title) {
PlayHeaderView.setTitle(title);
PlayHeaderView.setIsPlaying(true);
PlayHeaderView.showMenu();
PlayBodyView.switchContent(title);
});
layout.footer.add(PlayFooterView);
mainContext.add(layout);
});<|fim▁end|>
|
headerSize: 50,
footerSize: 50
|
<|file_name|>testing.rs<|end_file_name|><|fim▁begin|>// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use crate::{
raw::{config::*, security},
testing::s2n_tls::Harness,
};
use bytes::Bytes;
use core::task::Poll;
use std::collections::VecDeque;
pub mod s2n_tls;
type Error = Box<dyn std::error::Error>;
type Result<T, E = Error> = core::result::Result<T, E>;
/// The number of iterations that will be executed until the handshake exits with an error
///
/// This is to prevent endless looping without making progress on the connection.
const SAMPLES: usize = 100;
pub trait Connection: core::fmt::Debug {
fn poll<Ctx: Context>(&mut self, context: &mut Ctx) -> Poll<Result<()>>;
}
pub trait Context {
fn receive(&mut self, max_len: Option<usize>) -> Option<Bytes>;
fn send(&mut self, data: Bytes);
}
#[derive(Debug)]
pub struct Pair<Server: Connection, Client: Connection> {
pub server: (Server, MemoryContext),
pub client: (Client, MemoryContext),
pub max_iterations: usize,
}
impl<Server: Connection, Client: Connection> Pair<Server, Client> {
pub fn new(server: Server, client: Client, max_iterations: usize) -> Self {
Self {
server: (server, Default::default()),
client: (client, Default::default()),
max_iterations,
}<|fim▁hole|> self.max_iterations > 0,
"handshake has iterated too many times: {:#?}",
self,
);
let client_res = self.client.0.poll(&mut self.client.1);
let server_res = self.server.0.poll(&mut self.server.1);
self.client.1.transfer(&mut self.server.1);
self.max_iterations -= 1;
match (client_res, server_res) {
(Poll::Ready(client_res), Poll::Ready(server_res)) => {
client_res?;
server_res?;
Ok(()).into()
}
(Poll::Ready(client_res), _) => {
client_res?;
Poll::Pending
}
(_, Poll::Ready(server_res)) => {
server_res?;
Poll::Pending
}
_ => Poll::Pending,
}
}
}
#[derive(Debug, Default)]
pub struct MemoryContext {
rx: VecDeque<Bytes>,
tx: VecDeque<Bytes>,
}
impl MemoryContext {
pub fn transfer(&mut self, other: &mut Self) {
self.rx.extend(other.tx.drain(..));
other.rx.extend(self.tx.drain(..));
}
}
impl Context for MemoryContext {
fn receive(&mut self, max_len: Option<usize>) -> Option<Bytes> {
loop {
let mut chunk = self.rx.pop_front()?;
if chunk.is_empty() {
continue;
}
let max_len = max_len.unwrap_or(usize::MAX);
if chunk.len() > max_len {
self.rx.push_front(chunk.split_off(max_len));
}
return Some(chunk);
}
}
fn send(&mut self, data: Bytes) {
self.tx.push_back(data);
}
}
struct CertKeyPair {
cert: &'static [u8],
key: &'static [u8],
}
impl Default for CertKeyPair {
fn default() -> Self {
CertKeyPair {
cert: &include_bytes!("../../../../tests/pems/rsa_4096_sha512_client_cert.pem")[..],
key: &include_bytes!("../../../../tests/pems/rsa_4096_sha512_client_key.pem")[..],
}
}
}
impl CertKeyPair {
fn cert(&mut self) -> &'static [u8] {
self.cert
}
fn key(&mut self) -> &'static [u8] {
self.key
}
}
pub fn build_config(cipher_prefs: &security::Policy) -> Result<crate::raw::config::Config, Error> {
let mut builder = Builder::new();
let mut keypair = CertKeyPair::default();
// Build a config
builder
.set_security_policy(cipher_prefs)
.expect("Unable to set config cipher preferences");
builder
.load_pem(keypair.cert(), keypair.key())
.expect("Unable to load cert/pem");
unsafe {
let ctx: *mut core::ffi::c_void = std::ptr::null_mut();
builder
.set_verify_host_callback(Some(verify_host_cb), ctx)
.expect("Unable to set a host verify callback.");
builder
.disable_x509_verification()
.expect("Unable to disable x509 verification");
};
Ok(builder.build().expect("Unable to build server config"))
}
// host verify callback for x509
// see: https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#s2n_verify_host_fn
unsafe extern "C" fn verify_host_cb(
hostname: *const i8,
hostname_len: usize,
_context: *mut core::ffi::c_void,
) -> u8 {
let host_str = ::std::str::from_utf8(::std::slice::from_raw_parts(
hostname as *const u8,
hostname_len,
));
match host_str {
Err(_) => 0,
Ok(_host) => 1,
}
}
pub fn s2n_tls_pair(config: crate::raw::config::Config) {
// create and configure a server connection
let mut server = crate::raw::connection::Connection::new_server();
server
.set_config(config.clone())
.expect("Failed to bind config to server connection");
server
.set_client_auth_type(s2n_tls_sys::s2n_cert_auth_type::NONE)
.expect("Unable to set server client auth type");
let server = Harness::new(server);
// create a client connection
let mut client = crate::raw::connection::Connection::new_client();
client
.set_config(config)
.expect("Unabel to set client config");
let client = Harness::new(client);
let mut pair = Pair::new(server, client, SAMPLES);
loop {
match pair.poll() {
Poll::Ready(result) => {
result.unwrap();
break;
}
Poll::Pending => continue,
}
}
// TODO add assertions to make sure the handshake actually succeeded
}<|fim▁end|>
|
}
pub fn poll(&mut self) -> Poll<Result<()>> {
assert!(
|
<|file_name|>events.py<|end_file_name|><|fim▁begin|>EXCEPTION_INFO = 'exception_info'
MESSAGE = 'message'
PAYLOAD = 'payload'
_event_handlers = {
EXCEPTION_INFO: [],
MESSAGE: [],
PAYLOAD: []
}
def _check_type(typ):
if typ not in _event_handlers:
raise ValueError('Unknown type: %s. Must be one of %s' % (typ, _event_handlers.keys()))
def _add_handler(typ, handler_fn, pos):
_check_type(typ)
pos = pos if pos is not None else -1
handlers = _event_handlers[typ]
try:
handlers.index(handler_fn)
except ValueError:
handlers.insert(pos, handler_fn)
def _remove_handler(typ, handler_fn):
_check_type(typ)
handlers = _event_handlers[typ]
try:
index = handlers.index(handler_fn)
handlers.pop(index)
except ValueError:
pass
def _on_event(typ, target, **kw):
_check_type(typ)
ref = target
for handler in _event_handlers[typ]:
result = handler(ref, **kw)
if result is False:
return False
ref = result
return ref
# Add/remove event handlers
def add_exception_info_handler(handler_fn, pos=None):
_add_handler(EXCEPTION_INFO, handler_fn, pos)
def remove_exception_info_handler(handler_fn):
_remove_handler(EXCEPTION_INFO, handler_fn)
def add_message_handler(handler_fn, pos=None):
_add_handler(MESSAGE, handler_fn, pos)
def remove_message_handler(handler_fn):
_remove_handler(MESSAGE, handler_fn)
def add_payload_handler(handler_fn, pos=None):
_add_handler(PAYLOAD, handler_fn, pos)
def remove_payload_handler(handler_fn):
_remove_handler(PAYLOAD, handler_fn)
# Event handler processing
def on_exception_info(exc_info, **kw):
return _on_event(EXCEPTION_INFO, exc_info, **kw)
def on_message(message, **kw):
return _on_event(MESSAGE, message, **kw)
def on_payload(payload, **kw):
return _on_event(PAYLOAD, payload, **kw)<|fim▁hole|>
# Misc
def reset():
for handlers in _event_handlers.values():
del handlers[:]<|fim▁end|>
| |
<|file_name|>test_artificial_128_Fisher_MovingAverage_5__100.py<|end_file_name|><|fim▁begin|>import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
<|fim▁hole|>art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 5, transform = "Fisher", sigma = 0.0, exog_count = 100, ar_order = 0);<|fim▁end|>
| |
<|file_name|>admin.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4<|fim▁hole|>from django.contrib import admin
from .models import *
admin.site.register(MessageInWaiting)
admin.site.register(ResponseInWaiting)
admin.site.register(Template)<|fim▁end|>
| |
<|file_name|>moves.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
# Moves Computation
The goal of this file is to compute which
expressions/patterns/captures correspond to *moves*. This is
generally a function of the context in which the expression appears as
well as the expression's type.
## Examples
We will use the following fragment of code to explain the various
considerations. Note that in this code `x` is used after it has been
moved here. This is not relevant to this pass, though the information
we compute would later be used to detect this error (see the section
Enforcement of Moves, below).
struct Foo { a: int, b: ~int }
let x: Foo = ...;
let w = (x {Read}).a; // Read
let y = (x {Move}).b; // Move
let z = copy (x {Read}).b; // Read
Let's look at these examples one by one. In the first case, `w`, the
expression being assigned is `x.a`, which has `int` type. In that
case, the value is read, and the container (`x`) is also read.
In the second case, `y`, `x.b` is being assigned which has type
`~int`. Because this type moves by default, that will be a move
reference. Whenever we move from a compound expression like `x.b` (or
`x[b]` or `*x` or `{x)[b].c`, etc), this invalidates all containing
expressions since we do not currently permit "incomplete" variables
where part of them has been moved and part has not. In this case,
this means that the reference to `x` is also a move. We'll see later,
though, that these kind of "partial moves", where part of the
expression has been moved, are classified and stored somewhat
differently.
The final example (`z`) is `copy x.b`: in this case, although the
expression being assigned has type `~int`, there are no moves
involved.
### Patterns
For each binding in a match or let pattern, we also compute a read
or move designation. A move binding means that the value will be
moved from the value being matched. As a result, the expression
being matched (aka, the 'discriminant') is either moved or read
depending on whethe the bindings move the value they bind to out of
the discriminant.
For examples, consider this match expression:
match x {Move} {
Foo { a: a {Read}, b: b {Move} } => {...}
}
Here, the binding `b` is value (not ref) mode, and `b` has type
`~int`, and therefore the discriminant expression `x` would be
incomplete so it also considered moved.
In the following two examples, in contrast, the mode of `b` is either
`copy` or `ref` and hence the overall result is a read:
match x {Read} {
Foo { a: a {Read}, b: copy b {Read} } => {...}
}
match x {Read} {
Foo { a: a {Read}, b: ref b {Read} } => {...}
}
Similar reasoning can be applied to `let` expressions:
let Foo { a: a {Read}, b: b {Move} } = x {Move};
let Foo { a: a {Read}, b: copy b {Read} } = x {Read};
let Foo { a: a {Read}, b: ref b {Read} } = x {Read};
## Output
The pass results in the struct `MoveMaps` which contains several
maps:
`moves_map` is a set containing the id of every *outermost expression* or
*binding* that causes a move. Note that `moves_map` only contains the *outermost
expressions* that are moved. Therefore, if you have a use of `x.b`,
as in the example `y` above, the expression `x.b` would be in the
`moves_map` but not `x`. The reason for this is that, for most
purposes, it's only the outermost expression that is needed. The
borrow checker and trans, for example, only care about the outermost
expressions that are moved. It is more efficient therefore just to
store those entries.
Sometimes though we want to know the variables that are moved (in
particular in the borrow checker). For these cases, the set
`moved_variables_set` just collects the ids of variables that are
moved.
Finally, the `capture_map` maps from the node_id of a closure
expression to an array of `CaptureVar` structs detailing which
variables are captured and how (by ref, by copy, by move).
## Enforcement of Moves
The enforcement of moves is done by the borrow checker. Please see
the section "Moves and initialization" in `middle/borrowck/doc.rs`.
## Distributive property
Copies are "distributive" over parenthesization, but blocks are
considered rvalues. What this means is that, for example, neither
`a.clone()` nor `(a).clone()` will move `a` (presuming that `a` has a
linear type and `clone()` takes its self by reference), but
`{a}.clone()` will move `a`, as would `(if cond {a} else {b}).clone()`
and so on.
*/
use middle::pat_util::{pat_bindings};
use middle::freevars;
use middle::ty;
use middle::typeck::{method_map};
use util::ppaux;<|fim▁hole|>use std::hashmap::{HashSet, HashMap};
use syntax::ast::*;
use syntax::ast_util;
use syntax::visit;
use syntax::visit::vt;
use syntax::codemap::span;
#[deriving(Encodable, Decodable)]
pub enum CaptureMode {
CapCopy, // Copy the value into the closure.
CapMove, // Move the value into the closure.
CapRef, // Reference directly from parent stack frame (used by `&fn()`).
}
#[deriving(Encodable, Decodable)]
pub struct CaptureVar {
def: def, // Variable being accessed free
span: span, // Location of an access to this variable
mode: CaptureMode // How variable is being accessed
}
pub type CaptureMap = @mut HashMap<node_id, @[CaptureVar]>;
pub type MovesMap = @mut HashSet<node_id>;
/**
* Set of variable node-ids that are moved.
*
* Note: The `VariableMovesMap` stores expression ids that
* are moves, whereas this set stores the ids of the variables
* that are moved at some point */
pub type MovedVariablesSet = @mut HashSet<node_id>;
/** See the section Output on the module comment for explanation. */
pub struct MoveMaps {
moves_map: MovesMap,
moved_variables_set: MovedVariablesSet,
capture_map: CaptureMap
}
struct VisitContext {
tcx: ty::ctxt,
method_map: method_map,
move_maps: MoveMaps
}
#[deriving(Eq)]
enum UseMode {
Move, // This value or something owned by it is moved.
Read // Read no matter what the type.
}
pub fn compute_moves(tcx: ty::ctxt,
method_map: method_map,
crate: &crate) -> MoveMaps
{
let visitor = visit::mk_vt(@visit::Visitor {
visit_fn: compute_modes_for_fn,
visit_expr: compute_modes_for_expr,
visit_local: compute_modes_for_local,
.. *visit::default_visitor()
});
let visit_cx = VisitContext {
tcx: tcx,
method_map: method_map,
move_maps: MoveMaps {
moves_map: @mut HashSet::new(),
capture_map: @mut HashMap::new(),
moved_variables_set: @mut HashSet::new()
}
};
visit::visit_crate(crate, (visit_cx, visitor));
return visit_cx.move_maps;
}
pub fn moved_variable_node_id_from_def(def: def) -> Option<node_id> {
match def {
def_binding(nid, _) |
def_arg(nid, _) |
def_local(nid, _) |
def_self(nid, _) => Some(nid),
_ => None
}
}
///////////////////////////////////////////////////////////////////////////
// Expressions
fn compute_modes_for_local<'a>(local: @local,
(cx, v): (VisitContext,
vt<VisitContext>)) {
cx.use_pat(local.node.pat);
for local.node.init.iter().advance |&init| {
cx.use_expr(init, Read, v);
}
}
fn compute_modes_for_fn(fk: &visit::fn_kind,
decl: &fn_decl,
body: &blk,
span: span,
id: node_id,
(cx, v): (VisitContext,
vt<VisitContext>)) {
for decl.inputs.iter().advance |a| {
cx.use_pat(a.pat);
}
visit::visit_fn(fk, decl, body, span, id, (cx, v));
}
fn compute_modes_for_expr(expr: @expr,
(cx, v): (VisitContext,
vt<VisitContext>))
{
cx.consume_expr(expr, v);
}
impl VisitContext {
pub fn consume_exprs(&self, exprs: &[@expr], visitor: vt<VisitContext>) {
for exprs.iter().advance |expr| {
self.consume_expr(*expr, visitor);
}
}
pub fn consume_expr(&self, expr: @expr, visitor: vt<VisitContext>) {
/*!
* Indicates that the value of `expr` will be consumed,
* meaning either copied or moved depending on its type.
*/
debug!("consume_expr(expr=%s)",
expr.repr(self.tcx));
let expr_ty = ty::expr_ty_adjusted(self.tcx, expr);
if ty::type_moves_by_default(self.tcx, expr_ty) {
self.move_maps.moves_map.insert(expr.id);
self.use_expr(expr, Move, visitor);
} else {
self.use_expr(expr, Read, visitor);
};
}
pub fn consume_block(&self, blk: &blk, visitor: vt<VisitContext>) {
/*!
* Indicates that the value of `blk` will be consumed,
* meaning either copied or moved depending on its type.
*/
debug!("consume_block(blk.id=%?)", blk.id);
for blk.stmts.iter().advance |stmt| {
(visitor.visit_stmt)(*stmt, (*self, visitor));
}
for blk.expr.iter().advance |tail_expr| {
self.consume_expr(*tail_expr, visitor);
}
}
pub fn use_expr(&self,
expr: @expr,
expr_mode: UseMode,
visitor: vt<VisitContext>) {
/*!
* Indicates that `expr` is used with a given mode. This will
* in turn trigger calls to the subcomponents of `expr`.
*/
debug!("use_expr(expr=%s, mode=%?)",
expr.repr(self.tcx),
expr_mode);
// `expr_mode` refers to the post-adjustment value. If one of
// those adjustments is to take a reference, then it's only
// reading the underlying expression, not moving it.
let comp_mode = match self.tcx.adjustments.find(&expr.id) {
Some(&@ty::AutoDerefRef(
ty::AutoDerefRef {
autoref: Some(_), _})) => Read,
_ => expr_mode
};
debug!("comp_mode = %?", comp_mode);
match expr.node {
expr_path(*) | expr_self => {
match comp_mode {
Move => {
let def = self.tcx.def_map.get_copy(&expr.id);
let r = moved_variable_node_id_from_def(def);
for r.iter().advance |&id| {
self.move_maps.moved_variables_set.insert(id);
}
}
Read => {}
}
}
expr_unary(_, deref, base) => { // *base
if !self.use_overloaded_operator(
expr, base, [], visitor)
{
// Moving out of *base moves out of base.
self.use_expr(base, comp_mode, visitor);
}
}
expr_field(base, _, _) => { // base.f
// Moving out of base.f moves out of base.
self.use_expr(base, comp_mode, visitor);
}
expr_index(_, lhs, rhs) => { // lhs[rhs]
if !self.use_overloaded_operator(
expr, lhs, [rhs], visitor)
{
self.use_expr(lhs, comp_mode, visitor);
self.consume_expr(rhs, visitor);
}
}
expr_call(callee, ref args, _) => { // callee(args)
// Figure out whether the called function is consumed.
let mode = match ty::get(ty::expr_ty(self.tcx, callee)).sty {
ty::ty_closure(ref cty) => {
match cty.onceness {
Once => Move,
Many => Read,
}
},
ty::ty_bare_fn(*) => Read,
ref x =>
self.tcx.sess.span_bug(callee.span,
fmt!("non-function type in moves for expr_call: %?", x)),
};
// Note we're not using consume_expr, which uses type_moves_by_default
// to determine the mode, for this. The reason is that while stack
// closures should be noncopyable, they shouldn't move by default;
// calling a closure should only consume it if it's once.
if mode == Move {
self.move_maps.moves_map.insert(callee.id);
}
self.use_expr(callee, mode, visitor);
self.use_fn_args(callee.id, *args, visitor);
}
expr_method_call(callee_id, rcvr, _, _, ref args, _) => { // callee.m(args)
// Implicit self is equivalent to & mode, but every
// other kind should be + mode.
self.use_receiver(rcvr, visitor);
self.use_fn_args(callee_id, *args, visitor);
}
expr_struct(_, ref fields, opt_with) => {
for fields.iter().advance |field| {
self.consume_expr(field.node.expr, visitor);
}
for opt_with.iter().advance |with_expr| {
// If there are any fields whose type is move-by-default,
// then `with` is consumed, otherwise it is only read
let with_ty = ty::expr_ty(self.tcx, *with_expr);
let with_fields = match ty::get(with_ty).sty {
ty::ty_struct(did, ref substs) => {
ty::struct_fields(self.tcx, did, substs)
}
ref r => {
self.tcx.sess.span_bug(
with_expr.span,
fmt!("bad base expr type in record: %?", r))
}
};
// The `with` expr must be consumed if it contains
// any fields which (1) were not explicitly
// specified and (2) have a type that
// moves-by-default:
let consume_with = with_fields.iter().any(|tf| {
!fields.iter().any(|f| f.node.ident == tf.ident) &&
ty::type_moves_by_default(self.tcx, tf.mt.ty)
});
if consume_with {
self.consume_expr(*with_expr, visitor);
} else {
self.use_expr(*with_expr, Read, visitor);
}
}
}
expr_tup(ref exprs) => {
self.consume_exprs(*exprs, visitor);
}
expr_if(cond_expr, ref then_blk, opt_else_expr) => {
self.consume_expr(cond_expr, visitor);
self.consume_block(then_blk, visitor);
for opt_else_expr.iter().advance |else_expr| {
self.consume_expr(*else_expr, visitor);
}
}
expr_match(discr, ref arms) => {
// We must do this first so that `arms_have_by_move_bindings`
// below knows which bindings are moves.
for arms.iter().advance |arm| {
self.consume_arm(arm, visitor);
}
// The discriminant may, in fact, be partially moved
// if there are by-move bindings, but borrowck deals
// with that itself.
self.use_expr(discr, Read, visitor);
}
expr_copy(base) => {
self.use_expr(base, Read, visitor);
}
expr_paren(base) => {
// Note: base is not considered a *component* here, so
// use `expr_mode` not `comp_mode`.
self.use_expr(base, expr_mode, visitor);
}
expr_vec(ref exprs, _) => {
self.consume_exprs(*exprs, visitor);
}
expr_addr_of(_, base) => { // &base
self.use_expr(base, Read, visitor);
}
expr_inline_asm(*) |
expr_break(*) |
expr_again(*) |
expr_lit(*) => {}
expr_loop(ref blk, _) => {
self.consume_block(blk, visitor);
}
expr_log(a_expr, b_expr) => {
self.consume_expr(a_expr, visitor);
self.use_expr(b_expr, Read, visitor);
}
expr_while(cond_expr, ref blk) => {
self.consume_expr(cond_expr, visitor);
self.consume_block(blk, visitor);
}
expr_unary(_, _, lhs) => {
if !self.use_overloaded_operator(
expr, lhs, [], visitor)
{
self.consume_expr(lhs, visitor);
}
}
expr_binary(_, _, lhs, rhs) => {
if !self.use_overloaded_operator(
expr, lhs, [rhs], visitor)
{
self.consume_expr(lhs, visitor);
self.consume_expr(rhs, visitor);
}
}
expr_block(ref blk) => {
self.consume_block(blk, visitor);
}
expr_ret(ref opt_expr) => {
for opt_expr.iter().advance |expr| {
self.consume_expr(*expr, visitor);
}
}
expr_assign(lhs, rhs) => {
self.use_expr(lhs, Read, visitor);
self.consume_expr(rhs, visitor);
}
expr_cast(base, _) => {
self.consume_expr(base, visitor);
}
expr_assign_op(_, _, lhs, rhs) => {
// FIXME(#4712) --- Overloaded operators?
//
// if !self.use_overloaded_operator(
// expr, DoDerefArgs, lhs, [rhs], visitor)
// {
self.consume_expr(lhs, visitor);
self.consume_expr(rhs, visitor);
// }
}
expr_repeat(base, count, _) => {
self.consume_expr(base, visitor);
self.consume_expr(count, visitor);
}
expr_loop_body(base) |
expr_do_body(base) => {
self.use_expr(base, comp_mode, visitor);
}
expr_fn_block(ref decl, ref body) => {
for decl.inputs.iter().advance |a| {
self.use_pat(a.pat);
}
let cap_vars = self.compute_captures(expr.id);
self.move_maps.capture_map.insert(expr.id, cap_vars);
self.consume_block(body, visitor);
}
expr_vstore(base, _) => {
self.use_expr(base, comp_mode, visitor);
}
expr_mac(*) => {
self.tcx.sess.span_bug(
expr.span,
"macro expression remains after expansion");
}
}
}
pub fn use_overloaded_operator(&self,
expr: &expr,
receiver_expr: @expr,
arg_exprs: &[@expr],
visitor: vt<VisitContext>)
-> bool {
if !self.method_map.contains_key(&expr.id) {
return false;
}
self.use_receiver(receiver_expr, visitor);
// for overloaded operatrs, we are always passing in a
// borrowed pointer, so it's always read mode:
for arg_exprs.iter().advance |arg_expr| {
self.use_expr(*arg_expr, Read, visitor);
}
return true;
}
pub fn consume_arm(&self, arm: &arm, visitor: vt<VisitContext>) {
for arm.pats.iter().advance |pat| {
self.use_pat(*pat);
}
for arm.guard.iter().advance |guard| {
self.consume_expr(*guard, visitor);
}
self.consume_block(&arm.body, visitor);
}
pub fn use_pat(&self, pat: @pat) {
/*!
*
* Decides whether each binding in a pattern moves the value
* into itself or not based on its type and annotation.
*/
do pat_bindings(self.tcx.def_map, pat) |bm, id, _span, path| {
let binding_moves = match bm {
bind_by_ref(_) => false,
bind_infer => {
let pat_ty = ty::node_id_to_type(self.tcx, id);
debug!("pattern %? %s type is %s",
id,
ast_util::path_to_ident(path).repr(self.tcx),
pat_ty.repr(self.tcx));
ty::type_moves_by_default(self.tcx, pat_ty)
}
};
debug!("pattern binding %?: bm=%?, binding_moves=%b",
id, bm, binding_moves);
if binding_moves {
self.move_maps.moves_map.insert(id);
}
}
}
pub fn use_receiver(&self,
receiver_expr: @expr,
visitor: vt<VisitContext>) {
self.use_fn_arg(receiver_expr, visitor);
}
pub fn use_fn_args(&self,
_: node_id,
arg_exprs: &[@expr],
visitor: vt<VisitContext>) {
//! Uses the argument expressions.
for arg_exprs.iter().advance |arg_expr| {
self.use_fn_arg(*arg_expr, visitor);
}
}
pub fn use_fn_arg(&self, arg_expr: @expr, visitor: vt<VisitContext>) {
//! Uses the argument.
self.consume_expr(arg_expr, visitor)
}
pub fn arms_have_by_move_bindings(&self,
moves_map: MovesMap,
arms: &[arm])
-> Option<@pat> {
for arms.iter().advance |arm| {
for arm.pats.iter().advance |&pat| {
for ast_util::walk_pat(pat) |p| {
if moves_map.contains(&p.id) {
return Some(p);
}
}
}
}
return None;
}
pub fn compute_captures(&self, fn_expr_id: node_id) -> @[CaptureVar] {
debug!("compute_capture_vars(fn_expr_id=%?)", fn_expr_id);
let _indenter = indenter();
let fn_ty = ty::node_id_to_type(self.tcx, fn_expr_id);
let sigil = ty::ty_closure_sigil(fn_ty);
let freevars = freevars::get_freevars(self.tcx, fn_expr_id);
if sigil == BorrowedSigil {
// &fn() captures everything by ref
at_vec::from_fn(freevars.len(), |i| {
let fvar = &freevars[i];
CaptureVar {def: fvar.def, span: fvar.span, mode: CapRef}
})
} else {
// @fn() and ~fn() capture by copy or by move depending on type
at_vec::from_fn(freevars.len(), |i| {
let fvar = &freevars[i];
let fvar_def_id = ast_util::def_id_of_def(fvar.def).node;
let fvar_ty = ty::node_id_to_type(self.tcx, fvar_def_id);
debug!("fvar_def_id=%? fvar_ty=%s",
fvar_def_id, ppaux::ty_to_str(self.tcx, fvar_ty));
let mode = if ty::type_moves_by_default(self.tcx, fvar_ty) {
CapMove
} else {
CapCopy
};
CaptureVar {def: fvar.def, span: fvar.span, mode:mode}
})
}
}
}<|fim▁end|>
|
use util::ppaux::Repr;
use util::common::indenter;
use std::at_vec;
|
<|file_name|>exiter2.py<|end_file_name|><|fim▁begin|>import sys
def bye():
sys.exit(40) # Crucial error: abort now!
try:
bye()
except Exception:
print('got it') # Oops--we ignored the exit
<|fim▁hole|><|fim▁end|>
|
print('continuing...')
|
<|file_name|>liveness-init-in-fn-expr.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
let f: @fn() -> int = || {
let i: int;
i //~ ERROR use of possibly uninitialized variable: `i`<|fim▁hole|><|fim▁end|>
|
};
error!(f());
}
|
<|file_name|>integration.js<|end_file_name|><|fim▁begin|>module.exports = function (options, suite, test, expect, teardown) {
require('./integration.expressions')(options, suite, test, expect, teardown);
suite('tribe.storage.integration.' + options.type, function () {
var storage = require('tribe.storage'),
db;
test("basic store and retrieve", function () {
return open(['p1', 'p2'],
[
{ p1: 1, p2: 'test' },
{ p1: 2, p2: 'test2' }
])
.then(function (container) {
return container.retrieve({ p: 'p1', v: 1 });
})
.then(function (rows) {
expect(rows.length).to.equal(1);
expect(rows[0]).to.deep.equal({ p1: 1, p2: 'test' });
});
});
test("multiple key index store and retrieve", function () {
return open([['p1', 'p2']],
[
{ p1: 'test', p2: 1 },
{ p1: 'test', p2: 2 },
{ p1: 'test', p2: 3 },
{ p1: 'test2', p2: 2 },
])
.then(function (container) {
return container.retrieve([{ p: 'p1', v: 'test' }, { p: 'p2', o: '>=', v: 2 }]);
})
.then(function (rows) {
expect(rows.length).to.equal(2);
});
});
// this was originally done by sorting the index. this won't work with indexeddb as order is significant
// we can store some metadata about index order and apply expression components in the correct order. todo.
//test("multiple key order does not need to match expression order", function () {
// return open([['p1', 'p2']], [{ p1: 'test', p2: 1 }])
// .then(function (container) {
// return container.retrieve([{ p: 'p2', v: 1 }, { p: 'p1', v: 'test' }]);
// })
// .then(function (rows) {
// expect(rows.length).to.equal(1);
// });
//});
test("complex object store and retrieve", function () {
return open([['p1.p2', 'p3']],
[
{ p1: { p2: 'test' }, p3: 1 },
{ p1: { p2: 'test' }, p3: 1 },
{ p1: { p2: 'test2' }, p3: 1 }
])
.then(function (container) {
return container.retrieve([{ p: 'p1.p2', v: 'test' }, { p: 'p3', v: 1 }]);
})
.then(function (rows) {
expect(rows.length).to.equal(2);
});
});
test("keyPath can be queried when autoIncrement is set", function () {
return open([],
[
{ p2: 'test' },
{ p2: 'test2' }
], 'p1', true)
.then(function (container) {
return container.retrieve({ p: 'p1', v: 1 });
})
.then(function (rows) {
expect(rows.length).to.equal(1);
expect(rows[0]).to.deep.equal({ p1: 1, p2: 'test' });
});
});
test("keyPath can be queried when autoIncrement is not set", function () {
return open([],
[
{ p1: 3, p2: 'test' },
{ p1: 4, p2: 'test2' }
], 'p1', false)
.then(function (container) {
return container.retrieve({ p: 'p1', v: 3 });
})
.then(function (rows) {
expect(rows.length).to.equal(1);
expect(rows[0]).to.deep.equal({ p1: 3, p2: 'test' });
});
});
test("keyPath can be queried with indexes", function () {
return open(['p2'],
[
{ p1: 1, p2: 'test' },
{ p1: 2, p2: 'test2' }
], 'p1')
.then(function (container) {
return container.retrieve([{ p: 'p1', v: 1 }, { p: 'p2', v: 'test' }]);
})
.then(function (rows) {
expect(rows.length).to.equal(1);
expect(rows[0]).to.deep.equal({ p1: 1, p2: 'test' });
});
});
test("store operation returns entity with autoIncrement keyPath property set", function () {
return open([], [], 'id', true)
.then(function (container) {
return container.store({});
})
.then(function (updatedEntity) {
expect(updatedEntity).to.deep.equal({ id: 1 });
});
});
test("multiple store operation returns entities with autoIncrement keyPath property set", function () {
return open([], [], 'id', true)
.then(function (container) {
return container.store([{}, {}]);
})
.then(function (updatedEntity) {
expect(updatedEntity).to.deep.equal([{ id: 1 }, { id: 2 }]);
});
});
test("stored entity has autoIncrement keyPath property set", function () {
var container;
return open([], [], 'id', true)
.then(function (db) {
container = db;
return container.store({});
})
.then(function () {
return container.retrieve({ p: 'id', v: 1 });
})
.then(function (entities) {
expect(entities.length).to.equal(1);
expect(entities[0]).to.deep.equal({ id: 1 });
});
});
test("store operation replaces entities with matching keys", function () {
var entity;
return open([], [{ p1: 1, p2: 'test' }], 'p1')
.then(function (provider) {
entity = provider;
return entity.store({ p1: 1, p2: 'test2' });
})
.then(function () {
return entity.retrieve({ p: 'p1', v: 1 });
})
.then(function (entities) {
expect(entities.length).to.equal(1);
expect(entities[0].p2).to.equal('test2');
});
});
test("single property indexes can be specified and requested as arrays or individually", function () {
var container;
return open(['p1', ['p2']], [
{ p1: 1, p2: 1 },
{ p1: 2, p2: 2 }
])
.then(function (result) {
container = result;
return container.retrieve({ p: 'p1', v: 2 });
})
.then(function (results) {
expect(results.length).to.equal(1);
})
.then(function (result) {
return container.retrieve([{ p: 'p1', v: 2 }]);
})
.then(function (results) {
expect(results.length).to.equal(1);
})
.then(function (result) {
return container.retrieve({ p: 'p2', v: 2 });
})
.then(function (results) {
expect(results.length).to.equal(1);
})
.then(function (result) {
return container.retrieve([{ p: 'p2', v: 2 }]);
})
.then(function (results) {
expect(results.length).to.equal(1);
});
});
test("retrieve sorts by index properties", function () {
var container;
return open([['p1', 'p2.value'], ['p2.value', 'p1']], [
{ p1: 4, p2: { value: 1 } },
{ p1: 3, p2: { value: 2 } },
{ p1: 1, p2: { value: 1 } },
{ p1: 2, p2: { value: 2 } }
])
.then(function (result) {
container = result;
return container.retrieve([{ p: 'p1', o: '>', v: 0 }, { p: 'p2.value', o: '>', v: 0 }]);
})
.then(function (results) {
expect(results).to.deep.equal([
{ p1: 1, p2: { value: 1 } },
{ p1: 2, p2: { value: 2 } },
{ p1: 3, p2: { value: 2 } },
{ p1: 4, p2: { value: 1 } }
]);
return container.retrieve([{ p: 'p2.value', o: '>', v: 0 }, { p: 'p1', o: '>', v: 0 }]);
})
.then(function (results) {
expect(results).to.deep.equal([
{ p1: 1, p2: { value: 1 } },
{ p1: 4, p2: { value: 1 } },
{ p1: 2, p2: { value: 2 } },
{ p1: 3, p2: { value: 2 } }
]);
});
});
test("clear deletes all entities", function () {
var container;
return open(['p1'], [{ p1: 1, p2: 1 }])
.then(function (result) {
container = result;
container.clear();
})
.then(function () {
return container.retrieve({ p: 'p1', v: 1 });
})
.then(function (messages) {
expect(messages.length).to.equal(0);
});
});
function open(indexes, entities, keyPath, autoIncrement) {
var entity;
return storage.open([{ name: 'test', indexes: indexes, keyPath: keyPath, autoIncrement: autoIncrement }], options)
.then(function (provider) {
db = provider;
entity = provider.entity('test');
return entity.store(entities);
})
.then(function () {
return entity;
});
}
teardown(function () {
db.close();<|fim▁hole|> });
});
};<|fim▁end|>
| |
<|file_name|>CompositeContextTest.js<|end_file_name|><|fim▁begin|>import { suite, test, equal, isUndefined, isFalse, isTrue } from "../assert";
import CompositeContext from "di/CompositeContext";
import Context from "di/Context";
suite("CompositeContext", () => {
test("no contexts", () => {
const composite = new CompositeContext();
const hasFirst = composite.has("first");
const firstValue = composite.get("first");
isFalse(hasFirst);
isUndefined(firstValue);
});
test("single context", () => {
const context = new Context({ first: "first expected" });
const composite = new CompositeContext([context]);
const hasFirst = composite.has("first");
const firstValue = composite.get("first");
isTrue(hasFirst);
equal("first expected", firstValue);
});
test("multiple contexts", () => {
const primaryContext = new Context({ first: "first expected" });
const secondaryContext = new Context({
first: "nothing",
second: "second expected"<|fim▁hole|>
const hasFirst = composite.has("first");
const firstValue = composite.get("first");
isTrue(hasFirst);
equal("first expected", firstValue);
const hasSecond = composite.has("second");
const secondValue = composite.get("second");
isTrue(hasSecond);
equal("second expected", secondValue);
});
});<|fim▁end|>
|
});
const composite = new CompositeContext([primaryContext, secondaryContext]);
|
<|file_name|>RenderSVGResourceClipper.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2004, 2005, 2007, 2008 Nikolas Zimmermann <[email protected]>
* Copyright (C) 2004, 2005, 2006, 2007, 2008 Rob Buis <[email protected]>
* Copyright (C) Research In Motion Limited 2009-2010. All rights reserved.
* Copyright (C) 2011 Dirk Schulze <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public License
* along with this library; see the file COPYING.LIB. If not, write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*/
#include "config.h"
#include "RenderSVGResourceClipper.h"
#include "ElementIterator.h"
#include "Frame.h"
#include "FrameView.h"
#include "HitTestRequest.h"
#include "HitTestResult.h"
#include "IntRect.h"
#include "RenderObject.h"
#include "RenderStyle.h"
#include "RenderView.h"
#include "SVGNames.h"
#include "SVGRenderingContext.h"
#include "SVGResources.h"
#include "SVGResourcesCache.h"
#include "SVGUseElement.h"
namespace WebCore {
RenderSVGResourceClipper::RenderSVGResourceClipper(SVGClipPathElement& element, RenderStyle&& style)
: RenderSVGResourceContainer(element, WTFMove(style))
{
}
RenderSVGResourceClipper::~RenderSVGResourceClipper()
{
}
void RenderSVGResourceClipper::removeAllClientsFromCache(bool markForInvalidation)
{
m_clipBoundaries = FloatRect();
m_clipper.clear();
markAllClientsForInvalidation(markForInvalidation ? LayoutAndBoundariesInvalidation : ParentOnlyInvalidation);
}
void RenderSVGResourceClipper::removeClientFromCache(RenderElement& client, bool markForInvalidation)
{
m_clipper.remove(&client);
markClientForInvalidation(client, markForInvalidation ? BoundariesInvalidation : ParentOnlyInvalidation);
}
bool RenderSVGResourceClipper::applyResource(RenderElement& renderer, const RenderStyle&, GraphicsContext*& context, unsigned short resourceMode)
{
ASSERT(context);
ASSERT_UNUSED(resourceMode, resourceMode == ApplyToDefaultMode);
return applyClippingToContext(renderer, renderer.objectBoundingBox(), renderer.repaintRectInLocalCoordinates(), *context);
}
bool RenderSVGResourceClipper::pathOnlyClipping(GraphicsContext& context, const AffineTransform& animatedLocalTransform, const FloatRect& objectBoundingBox)
{
// If the current clip-path gets clipped itself, we have to fallback to masking.
if (!style().svgStyle().clipperResource().isEmpty())
return false;
WindRule clipRule = RULE_NONZERO;
Path clipPath = Path();
// If clip-path only contains one visible shape or path, we can use path-based clipping. Invisible
// shapes don't affect the clipping and can be ignored. If clip-path contains more than one
// visible shape, the additive clipping may not work, caused by the clipRule. EvenOdd
// as well as NonZero can cause self-clipping of the elements.
// See also http://www.w3.org/TR/SVG/painting.html#FillRuleProperty
for (Node* childNode = clipPathElement().firstChild(); childNode; childNode = childNode->nextSibling()) {
RenderObject* renderer = childNode->renderer();
if (!renderer)
continue;
// Only shapes or paths are supported for direct clipping. We need to fallback to masking for texts.
if (renderer->isSVGText())
return false;
if (!childNode->isSVGElement() || !downcast<SVGElement>(*childNode).isSVGGraphicsElement())
continue;
SVGGraphicsElement& styled = downcast<SVGGraphicsElement>(*childNode);
const RenderStyle& style = renderer->style();
if (style.display() == NONE || style.visibility() != VISIBLE)
continue;
const SVGRenderStyle& svgStyle = style.svgStyle();
// Current shape in clip-path gets clipped too. Fallback to masking.
if (!svgStyle.clipperResource().isEmpty())
return false;
// Fallback to masking, if there is more than one clipping path.
if (clipPath.isEmpty()) {
styled.toClipPath(clipPath);
clipRule = svgStyle.clipRule();
} else
return false;
}
// Only one visible shape/path was found. Directly continue clipping and transform the content to userspace if necessary.
if (clipPathElement().clipPathUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) {
AffineTransform transform;
transform.translate(objectBoundingBox.x(), objectBoundingBox.y());
transform.scaleNonUniform(objectBoundingBox.width(), objectBoundingBox.height());
clipPath.transform(transform);
}
// Transform path by animatedLocalTransform.
clipPath.transform(animatedLocalTransform);
// The SVG specification wants us to clip everything, if clip-path doesn't have a child.
if (clipPath.isEmpty())
clipPath.addRect(FloatRect());
context.clipPath(clipPath, clipRule);
return true;
}
bool RenderSVGResourceClipper::applyClippingToContext(RenderElement& renderer, const FloatRect& objectBoundingBox, const FloatRect& repaintRect, GraphicsContext& context)
{
ClipperMaskImage& clipperMaskImage = addRendererToClipper(renderer);
bool shouldCreateClipperMaskImage = !clipperMaskImage;
AffineTransform animatedLocalTransform = clipPathElement().animatedLocalTransform();
if (shouldCreateClipperMaskImage && pathOnlyClipping(context, animatedLocalTransform, objectBoundingBox))
return true;
AffineTransform absoluteTransform = SVGRenderingContext::calculateTransformationToOutermostCoordinateSystem(renderer);
if (shouldCreateClipperMaskImage && !repaintRect.isEmpty()) {
// FIXME (149469): This image buffer should not be unconditionally unaccelerated. Making it match the context breaks nested clipping, though.
clipperMaskImage = SVGRenderingContext::createImageBuffer(repaintRect, absoluteTransform, ColorSpaceSRGB, Unaccelerated);
if (!clipperMaskImage)
return false;
GraphicsContext& maskContext = clipperMaskImage->context();
maskContext.concatCTM(animatedLocalTransform);
// clipPath can also be clipped by another clipPath.
auto* resources = SVGResourcesCache::cachedResourcesForRenderer(*this);
RenderSVGResourceClipper* clipper;
bool succeeded;
if (resources && (clipper = resources->clipper())) {
GraphicsContextStateSaver stateSaver(maskContext);
if (!clipper->applyClippingToContext(*this, objectBoundingBox, repaintRect, maskContext))
return false;
succeeded = drawContentIntoMaskImage(clipperMaskImage, objectBoundingBox);
// The context restore applies the clipping on non-CG platforms.
} else
succeeded = drawContentIntoMaskImage(clipperMaskImage, objectBoundingBox);
if (!succeeded)
clipperMaskImage.reset();
}
if (!clipperMaskImage)
return false;
SVGRenderingContext::clipToImageBuffer(context, absoluteTransform, repaintRect, clipperMaskImage, shouldCreateClipperMaskImage);
return true;
}
bool RenderSVGResourceClipper::drawContentIntoMaskImage(const ClipperMaskImage& clipperMaskImage, const FloatRect& objectBoundingBox)
{
ASSERT(clipperMaskImage);
GraphicsContext& maskContext = clipperMaskImage->context();
AffineTransform maskContentTransformation;
if (clipPathElement().clipPathUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) {
maskContentTransformation.translate(objectBoundingBox.x(), objectBoundingBox.y());
maskContentTransformation.scaleNonUniform(objectBoundingBox.width(), objectBoundingBox.height());
maskContext.concatCTM(maskContentTransformation);
}
// Switch to a paint behavior where all children of this <clipPath> will be rendered using special constraints:
// - fill-opacity/stroke-opacity/opacity set to 1
// - masker/filter not applied when rendering the children
// - fill is set to the initial fill paint server (solid, black)
// - stroke is set to the initial stroke paint server (none)
PaintBehavior oldBehavior = view().frameView().paintBehavior();
view().frameView().setPaintBehavior(oldBehavior | PaintBehaviorRenderingSVGMask);
// Draw all clipPath children into a global mask.
for (auto& child : childrenOfType<SVGElement>(clipPathElement())) {
auto renderer = child.renderer();
if (!renderer)
continue;
if (renderer->needsLayout()) {
view().frameView().setPaintBehavior(oldBehavior);
return false;
}
const RenderStyle& style = renderer->style();
if (style.display() == NONE || style.visibility() != VISIBLE)
continue;
WindRule newClipRule = style.svgStyle().clipRule();
bool isUseElement = child.hasTagName(SVGNames::useTag);
if (isUseElement) {
SVGUseElement& useElement = downcast<SVGUseElement>(child);
renderer = useElement.rendererClipChild();
if (!renderer)
continue;
if (!useElement.hasAttributeWithoutSynchronization(SVGNames::clip_ruleAttr))
newClipRule = renderer->style().svgStyle().clipRule();
}
// Only shapes, paths and texts are allowed for clipping.
if (!renderer->isSVGShape() && !renderer->isSVGText())
continue;
maskContext.setFillRule(newClipRule);
// In the case of a <use> element, we obtained its renderere above, to retrieve its clipRule.
// We have to pass the <use> renderer itself to renderSubtreeToImageBuffer() to apply it's x/y/transform/etc. values when rendering.
// So if isUseElement is true, refetch the childNode->renderer(), as renderer got overriden above.
SVGRenderingContext::renderSubtreeToImageBuffer(clipperMaskImage.get(), isUseElement ? *child.renderer() : *renderer, maskContentTransformation);
}
<|fim▁hole|>
void RenderSVGResourceClipper::calculateClipContentRepaintRect()
{
// This is a rough heuristic to appraise the clip size and doesn't consider clip on clip.
for (Node* childNode = clipPathElement().firstChild(); childNode; childNode = childNode->nextSibling()) {
RenderObject* renderer = childNode->renderer();
if (!childNode->isSVGElement() || !renderer)
continue;
if (!renderer->isSVGShape() && !renderer->isSVGText() && !childNode->hasTagName(SVGNames::useTag))
continue;
const RenderStyle& style = renderer->style();
if (style.display() == NONE || style.visibility() != VISIBLE)
continue;
m_clipBoundaries.unite(renderer->localToParentTransform().mapRect(renderer->repaintRectInLocalCoordinates()));
}
m_clipBoundaries = clipPathElement().animatedLocalTransform().mapRect(m_clipBoundaries);
}
ClipperMaskImage& RenderSVGResourceClipper::addRendererToClipper(const RenderObject& object)
{
return m_clipper.add(&object, ClipperMaskImage()).iterator->value;
}
bool RenderSVGResourceClipper::hitTestClipContent(const FloatRect& objectBoundingBox, const FloatPoint& nodeAtPoint)
{
FloatPoint point = nodeAtPoint;
if (!SVGRenderSupport::pointInClippingArea(*this, point))
return false;
if (clipPathElement().clipPathUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) {
AffineTransform transform;
transform.translate(objectBoundingBox.x(), objectBoundingBox.y());
transform.scaleNonUniform(objectBoundingBox.width(), objectBoundingBox.height());
point = transform.inverse().value_or(AffineTransform()).mapPoint(point);
}
point = clipPathElement().animatedLocalTransform().inverse().value_or(AffineTransform()).mapPoint(point);
for (Node* childNode = clipPathElement().firstChild(); childNode; childNode = childNode->nextSibling()) {
RenderObject* renderer = childNode->renderer();
if (!childNode->isSVGElement() || !renderer)
continue;
if (!renderer->isSVGShape() && !renderer->isSVGText() && !childNode->hasTagName(SVGNames::useTag))
continue;
IntPoint hitPoint;
HitTestResult result(hitPoint);
if (renderer->nodeAtFloatPoint(HitTestRequest(HitTestRequest::SVGClipContent | HitTestRequest::DisallowUserAgentShadowContent), result, point, HitTestForeground))
return true;
}
return false;
}
FloatRect RenderSVGResourceClipper::resourceBoundingBox(const RenderObject& object)
{
// Resource was not layouted yet. Give back the boundingBox of the object.
if (selfNeedsLayout()) {
addRendererToClipper(object);
return object.objectBoundingBox();
}
if (m_clipBoundaries.isEmpty())
calculateClipContentRepaintRect();
if (clipPathElement().clipPathUnits() == SVGUnitTypes::SVG_UNIT_TYPE_OBJECTBOUNDINGBOX) {
FloatRect objectBoundingBox = object.objectBoundingBox();
AffineTransform transform;
transform.translate(objectBoundingBox.x(), objectBoundingBox.y());
transform.scaleNonUniform(objectBoundingBox.width(), objectBoundingBox.height());
return transform.mapRect(m_clipBoundaries);
}
return m_clipBoundaries;
}
}<|fim▁end|>
|
view().frameView().setPaintBehavior(oldBehavior);
return true;
}
|
<|file_name|>253_add_pci_requests_to_instance_extra_table.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'pci_requests'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):<|fim▁hole|>
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
if hasattr(table.c, NEW_COLUMN_NAME):
getattr(table.c, NEW_COLUMN_NAME).drop()<|fim▁end|>
|
table.create_column(new_column)
|
<|file_name|>TopKStateUpdater.java<|end_file_name|><|fim▁begin|>package storm.starter.trident.homework.state;
import storm.trident.operation.TridentCollector;
import storm.trident.state.BaseStateUpdater;
import storm.trident.tuple.TridentTuple;
import java.util.ArrayList;
import java.util.List;
/**
* Updater class that updates the state with the new tweets.
* Created by Parth Satra on 4/5/15.
*/
public class TopKStateUpdater extends BaseStateUpdater<TopKState> {
@Override
public void updateState(TopKState topKState, List<TridentTuple> list, TridentCollector tridentCollector) {
for(TridentTuple tuple : list) {
// Gets all the space separated hashtags.
String hashTags = tuple.getString(0);<|fim▁hole|> // Creates the list to be added to the state
List<TopTweet> tweetList = new ArrayList<TopTweet>();
for(String t : tag) {
if(t != null && t.trim().length() != 0) {
TopTweet tt = new TopTweet(t, 1);
tweetList.add(tt);
}
}
// Adds the list to the state.
topKState.add(tweetList);
}
}
}<|fim▁end|>
|
String[] tag = hashTags.split(" ");
|
<|file_name|>ui-checkbox.js<|end_file_name|><|fim▁begin|>import Ember from 'ember';
import CheckboxMixin from '../mixins/checkbox-mixin';
export default Ember.Component.extend(CheckboxMixin, {
type: 'checkbox',
checked: false,
onChange: function() {
this.set('checked', this.$('input').prop('checked'));
this.sendAction("action", {
checked: this.get('checked'),<|fim▁hole|> value: this.get('value')
});
}
});<|fim▁end|>
| |
<|file_name|>monit.py<|end_file_name|><|fim▁begin|>#! /usr/bin/python
# vim: set fileencoding=utf-8 sw=4 ts=4 et:
import rrdtool, os
from def_rrds import rrds
import def_metrics
from def_metrics import metrics
<|fim▁hole|> for i,f in enumerate(metrics[m]['rrd']):
if not os.path.isfile(f):
rrdtool.create(f, rrds[m][i])
else:
if not os.path.isfile(metrics[m]['rrd']):
rrdtool.create(metrics[m]['rrd'], rrds[m])
values = getattr(def_metrics, '_get_'+m)()
#print(m, repr(values))
if isinstance(metrics[m]['rrd'], list):
# multiples rrds
for i,f in enumerate(metrics[m]['rrd']):
rrdtool.update(f, 'N:'+':'.join(values[i]))
else:
if isinstance(values, str):
rrdtool.update(metrics[m]['rrd'], 'N:%s' % values)
else: # tuple
rrdtool.update(metrics[m]['rrd'], 'N:'+':'.join(values))<|fim▁end|>
|
for m in metrics:
if isinstance(metrics[m]['rrd'], list):
# multiples rrds
|
<|file_name|>standalone_bot.py<|end_file_name|><|fim▁begin|>""" Run this file to run bots as a standalone application, detached from the webapp """
from snoohelper.utils.teams import SlackTeamsController
TESTING = False
def main():
if not TESTING:
SlackTeamsController("teams.ini", 'snoohelper_master.db')
else:<|fim▁hole|>
if __name__ == "__main__":
main()<|fim▁end|>
|
SlackTeamsController("teams_test.ini", 'snoohelper_test.db')
|
<|file_name|>pack_usb.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
#coding: utf8
import sys, struct
data_size = 2*1024*1024
if len(sys.argv) != 3:
print 'Using: ./pack_usb.py input.bin output.bin'
sys.exit(0)
fin = file(sys.argv[1], 'rb')
data = fin.read()
print 'Size: %d bytes' % len(data)
if len(data) > data_size-2:
print 'Error: too big'
sys.exit(0)
data += b'\xa5'*(data_size-2-len(data))
fout = file(sys.argv[-1], 'wb')
fout.write(data)
checksum = sum([struct.unpack('<H', data[i:i+2])[0] for i in range(0, len(data), 2)]) % 0x10000
#fout.write(b'\0'*(data_size-2-len(data)))<|fim▁hole|>fout.write(struct.pack('<H', (0x1aa55-checksum)%0x10000))
fout.close()<|fim▁end|>
|
print 'Checksum: 0x%04x' % checksum
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import os
from setuptools import setup
LONG_DESCRIPTION = """
A modular framework for mobile surveys and field data collection via offline-capable mobile web apps.
"""
def readme():
try:
readme = open('README.md')
except IOError:
return LONG_DESCRIPTION
else:
return readme.read()
setup(
name='wq',
use_scm_version=True,
author='S. Andrew Sheppard',
author_email='[email protected]',
url='https://wq.io/',
license='MIT',
description=LONG_DESCRIPTION.strip(),
long_description=readme(),
long_description_content_type='text/markdown',
install_requires=[
'wq.app>=1.3.0b1,<1.4.0',
'wq.build>=1.3.0b1,<1.4.0',
'wq.create>=1.3.0b1,<1.4.0',
'wq.db>=1.3.0b1,<1.4.0',
],
python_requires='>=3',
packages=['wq'],
namespace_packages=['wq'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: JavaScript',
'Programming Language :: Python :: 3',<|fim▁hole|> 'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Framework :: Django :: 3.1',
'Framework :: Django :: 3.0',
'Framework :: Django :: 3.2',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Pre-processors',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Text Processing :: Markup :: XML',
],
setup_requires=[
'setuptools_scm',
],
project_urls={
'Homepage': 'https://wq.io/',
'Documentation': 'https://wq.io/',
'Source': 'https://github.com/wq/wq',
'Release Notes': 'https://github.com/wq/wq/releases',
'Issues': 'https://github.com/wq/wq/issues',
},
)<|fim▁end|>
|
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
|
<|file_name|>twitter_eggmode.rs<|end_file_name|><|fim▁begin|>extern crate egg_mode;
extern crate tokio;
use super::notifier::Error;
use super::NotifierStrategy;
use super::Message;
use self::tokio::runtime::current_thread::block_on_all;
use std::string::ToString;
pub struct TwitterEggMode {
token: egg_mode::Token,
}
impl TwitterEggMode {
pub fn new(consumer_key: &str, consumer_secret: &str, access_key: &str, access_secret: &str) -> Self {
// create twitter client
let consumer = egg_mode::KeyPair::new(consumer_key.to_owned(), consumer_secret.to_owned());
let access = egg_mode::KeyPair::new(access_key.to_owned(), access_secret.to_owned());
let token = egg_mode::Token::Access { consumer, access };
Self { token }
}
}
impl NotifierStrategy for TwitterEggMode {
fn notify(&self, message: &Message) -> Result<(), Error> {
let truncated = message.truncate(140);
block_on_all(
egg_mode::tweet::DraftTweet::new(truncated.body())
.send(&self.token)
).map_err(|e| Error::FailedToPostMessage(e.to_string()))?;
Ok(())<|fim▁hole|><|fim▁end|>
|
}
}
|
<|file_name|>LED.java<|end_file_name|><|fim▁begin|>package org.usfirst.frc369.Robot2017Code.subsystems;
import org.usfirst.frc369.Robot2017Code.Robot;
import edu.wpi.first.wpilibj.Relay;
import edu.wpi.first.wpilibj.command.Subsystem;
/**
*
*/
public class LED extends Subsystem {
// Put methods for controlling this subsystem
// here. Call these from Commands.
public void initDefaultCommand() {
// Set the default command for a subsystem here.
//setDefaultCommand(new MySpecialCommand());
}<|fim▁hole|> }
public void LEDelse(){
Robot.LEDSys.equals(Relay.Value.kReverse);
}
public void LEDOff(){
Robot.LEDSys.equals(Relay.Value.kOff);
}
}<|fim▁end|>
|
public void LEDOn(){
Robot.LEDSys.equals(Relay.Value.kForward);
|
<|file_name|>PtConsequencesStructure.java<|end_file_name|><|fim▁begin|>//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v2.2.8-b130911.1802
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2017.11.30 at 08:24:17 PM JST
//
package uk.org.siri.siri;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlType;
/**
* Type for list of effects.
*
* <p>Java class for PtConsequencesStructure complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="PtConsequencesStructure">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="Consequence" type="{http://www.siri.org.uk/siri}PtConsequenceStructure" maxOccurs="unbounded"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "PtConsequencesStructure", propOrder = {
"consequence"
})
public class PtConsequencesStructure {
@XmlElement(name = "Consequence", required = true)
protected List<PtConsequenceStructure> consequence;
/**
* Gets the value of the consequence property.
*
* <p>
* This accessor method returns a reference to the live list,
* not a snapshot. Therefore any modification you make to the
* returned list will be present inside the JAXB object.
* This is why there is not a <CODE>set</CODE> method for the consequence property.
*
* <p>
* For example, to add a new item, do as follows:
* <pre>
* getConsequence().add(newItem);
* </pre>
*
*
* <p>
* Objects of the following type(s) are allowed in the list
* {@link PtConsequenceStructure }
*
*
*/
public List<PtConsequenceStructure> getConsequence() {<|fim▁hole|> if (consequence == null) {
consequence = new ArrayList<PtConsequenceStructure>();
}
return this.consequence;
}
}<|fim▁end|>
| |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""This package implements a very simple Qt GUI that can load a
pipeline, change its parameters based on aliases, and execute them on
the spreadsheet."""
from __future__ import division
<|fim▁hole|>name = 'Pipeline Editor'
version = '0.0.2'
old_identifiers = ['edu.utah.sci.vistrails.pipelineedit']<|fim▁end|>
|
identifier = 'org.vistrails.vistrails.pipelineedit'
|
<|file_name|>MysqlDriver.ts<|end_file_name|><|fim▁begin|>import {Driver} from "../Driver";
import {ConnectionIsNotSetError} from "../../error/ConnectionIsNotSetError";
import {DriverPackageNotInstalledError} from "../../error/DriverPackageNotInstalledError";
import {DriverUtils} from "../DriverUtils";
import {MysqlQueryRunner} from "./MysqlQueryRunner";
import {ObjectLiteral} from "../../common/ObjectLiteral";
import {ColumnMetadata} from "../../metadata/ColumnMetadata";
import {DateUtils} from "../../util/DateUtils";
import {PlatformTools} from "../../platform/PlatformTools";
import {Connection} from "../../connection/Connection";
import {RdbmsSchemaBuilder} from "../../schema-builder/RdbmsSchemaBuilder";
import {MysqlConnectionOptions} from "./MysqlConnectionOptions";
import {MappedColumnTypes} from "../types/MappedColumnTypes";
import {ColumnType} from "../types/ColumnTypes";
import {DataTypeDefaults} from "../types/DataTypeDefaults";
import {TableColumn} from "../../schema-builder/table/TableColumn";
import {MysqlConnectionCredentialsOptions} from "./MysqlConnectionCredentialsOptions";
import {EntityMetadata} from "../../metadata/EntityMetadata";
import {OrmUtils} from "../../util/OrmUtils";
import {ApplyValueTransformers} from "../../util/ApplyValueTransformers";
import {ReplicationMode} from "../types/ReplicationMode";
/**
* Organizes communication with MySQL DBMS.<|fim▁hole|>
// -------------------------------------------------------------------------
// Public Properties
// -------------------------------------------------------------------------
/**
* Connection used by driver.
*/
connection: Connection;
/**
* Mysql underlying library.
*/
mysql: any;
/**
* Connection pool.
* Used in non-replication mode.
*/
pool: any;
/**
* Pool cluster used in replication mode.
*/
poolCluster: any;
// -------------------------------------------------------------------------
// Public Implemented Properties
// -------------------------------------------------------------------------
/**
* Connection options.
*/
options: MysqlConnectionOptions;
/**
* Master database used to perform all write queries.
*/
database?: string;
/**
* Indicates if replication is enabled.
*/
isReplicated: boolean = false;
/**
* Indicates if tree tables are supported by this driver.
*/
treeSupport = true;
/**
* Gets list of supported column data types by a driver.
*
* @see https://www.tutorialspoint.com/mysql/mysql-data-types.htm
* @see https://dev.mysql.com/doc/refman/8.0/en/data-types.html
*/
supportedDataTypes: ColumnType[] = [
// numeric types
"bit",
"int",
"integer", // synonym for int
"tinyint",
"smallint",
"mediumint",
"bigint",
"float",
"double",
"double precision", // synonym for double
"real", // synonym for double
"decimal",
"dec", // synonym for decimal
"numeric", // synonym for decimal
"fixed", // synonym for decimal
"bool", // synonym for tinyint
"boolean", // synonym for tinyint
// date and time types
"date",
"datetime",
"timestamp",
"time",
"year",
// string types
"char",
"nchar", // synonym for national char
"national char",
"varchar",
"nvarchar", // synonym for national varchar
"national varchar",
"blob",
"text",
"tinyblob",
"tinytext",
"mediumblob",
"mediumtext",
"longblob",
"longtext",
"enum",
"set",
"binary",
"varbinary",
// json data type
"json",
// spatial data types
"geometry",
"point",
"linestring",
"polygon",
"multipoint",
"multilinestring",
"multipolygon",
"geometrycollection"
];
/**
* Gets list of spatial column data types.
*/
spatialTypes: ColumnType[] = [
"geometry",
"point",
"linestring",
"polygon",
"multipoint",
"multilinestring",
"multipolygon",
"geometrycollection"
];
/**
* Gets list of column data types that support length by a driver.
*/
withLengthColumnTypes: ColumnType[] = [
"char",
"varchar",
"nvarchar",
"binary",
"varbinary"
];
/**
* Gets list of column data types that support length by a driver.
*/
withWidthColumnTypes: ColumnType[] = [
"bit",
"tinyint",
"smallint",
"mediumint",
"int",
"integer",
"bigint"
];
/**
* Gets list of column data types that support precision by a driver.
*/
withPrecisionColumnTypes: ColumnType[] = [
"decimal",
"dec",
"numeric",
"fixed",
"float",
"double",
"double precision",
"real",
"time",
"datetime",
"timestamp"
];
/**
* Gets list of column data types that supports scale by a driver.
*/
withScaleColumnTypes: ColumnType[] = [
"decimal",
"dec",
"numeric",
"fixed",
"float",
"double",
"double precision",
"real"
];
/**
* Gets list of column data types that supports UNSIGNED and ZEROFILL attributes.
*/
unsignedAndZerofillTypes: ColumnType[] = [
"int",
"integer",
"smallint",
"tinyint",
"mediumint",
"bigint",
"decimal",
"dec",
"numeric",
"fixed",
"float",
"double",
"double precision",
"real"
];
/**
* ORM has special columns and we need to know what database column types should be for those columns.
* Column types are driver dependant.
*/
mappedDataTypes: MappedColumnTypes = {
createDate: "datetime",
createDatePrecision: 6,
createDateDefault: "CURRENT_TIMESTAMP(6)",
updateDate: "datetime",
updateDatePrecision: 6,
updateDateDefault: "CURRENT_TIMESTAMP(6)",
deleteDate: "datetime",
deleteDatePrecision: 6,
deleteDateNullable: true,
version: "int",
treeLevel: "int",
migrationId: "int",
migrationName: "varchar",
migrationTimestamp: "bigint",
cacheId: "int",
cacheIdentifier: "varchar",
cacheTime: "bigint",
cacheDuration: "int",
cacheQuery: "text",
cacheResult: "text",
metadataType: "varchar",
metadataDatabase: "varchar",
metadataSchema: "varchar",
metadataTable: "varchar",
metadataName: "varchar",
metadataValue: "text",
};
/**
* Default values of length, precision and scale depends on column data type.
* Used in the cases when length/precision/scale is not specified by user.
*/
dataTypeDefaults: DataTypeDefaults = {
"varchar": { length: 255 },
"nvarchar": { length: 255 },
"national varchar": { length: 255 },
"char": { length: 1 },
"binary": { length: 1 },
"varbinary": { length: 255 },
"decimal": { precision: 10, scale: 0 },
"dec": { precision: 10, scale: 0 },
"numeric": { precision: 10, scale: 0 },
"fixed": { precision: 10, scale: 0 },
"float": { precision: 12 },
"double": { precision: 22 },
"time": { precision: 0 },
"datetime": { precision: 0 },
"timestamp": { precision: 0 },
"bit": { width: 1 },
"int": { width: 11 },
"integer": { width: 11 },
"tinyint": { width: 4 },
"smallint": { width: 6 },
"mediumint": { width: 9 },
"bigint": { width: 20 }
};
/**
* Max length allowed by MySQL for aliases.
* @see https://dev.mysql.com/doc/refman/5.5/en/identifiers.html
*/
maxAliasLength = 63;
// -------------------------------------------------------------------------
// Constructor
// -------------------------------------------------------------------------
constructor(connection: Connection) {
this.connection = connection;
this.options = {
legacySpatialSupport: true,
...connection.options
} as MysqlConnectionOptions;
this.isReplicated = this.options.replication ? true : false;
// load mysql package
this.loadDependencies();
this.database = this.options.replication ? this.options.replication.master.database : this.options.database;
// validate options to make sure everything is set
// todo: revisit validation with replication in mind
// if (!(this.options.host || (this.options.extra && this.options.extra.socketPath)) && !this.options.socketPath)
// throw new DriverOptionNotSetError("socketPath and host");
// if (!this.options.username)
// throw new DriverOptionNotSetError("username");
// if (!this.options.database)
// throw new DriverOptionNotSetError("database");
// todo: check what is going on when connection is setup without database and how to connect to a database then?
// todo: provide options to auto-create a database if it does not exist yet
}
// -------------------------------------------------------------------------
// Public Methods
// -------------------------------------------------------------------------
/**
* Performs connection to the database.
*/
async connect(): Promise<void> {
if (this.options.replication) {
this.poolCluster = this.mysql.createPoolCluster(this.options.replication);
this.options.replication.slaves.forEach((slave, index) => {
this.poolCluster.add("SLAVE" + index, this.createConnectionOptions(this.options, slave));
});
this.poolCluster.add("MASTER", this.createConnectionOptions(this.options, this.options.replication.master));
} else {
this.pool = await this.createPool(this.createConnectionOptions(this.options, this.options));
}
}
/**
* Makes any action after connection (e.g. create extensions in Postgres driver).
*/
afterConnect(): Promise<void> {
return Promise.resolve();
}
/**
* Closes connection with the database.
*/
async disconnect(): Promise<void> {
if (!this.poolCluster && !this.pool)
return Promise.reject(new ConnectionIsNotSetError("mysql"));
if (this.poolCluster) {
return new Promise<void>((ok, fail) => {
this.poolCluster.end((err: any) => err ? fail(err) : ok());
this.poolCluster = undefined;
});
}
if (this.pool) {
return new Promise<void>((ok, fail) => {
this.pool.end((err: any) => {
if (err) return fail(err);
this.pool = undefined;
ok();
});
});
}
}
/**
* Creates a schema builder used to build and sync a schema.
*/
createSchemaBuilder() {
return new RdbmsSchemaBuilder(this.connection);
}
/**
* Creates a query runner used to execute database queries.
*/
createQueryRunner(mode: ReplicationMode) {
return new MysqlQueryRunner(this, mode);
}
/**
* Replaces parameters in the given sql with special escaping character
* and an array of parameter names to be passed to a query.
*/
escapeQueryWithParameters(sql: string, parameters: ObjectLiteral, nativeParameters: ObjectLiteral): [string, any[]] {
const escapedParameters: any[] = Object.keys(nativeParameters).map(key => nativeParameters[key]);
if (!parameters || !Object.keys(parameters).length)
return [sql, escapedParameters];
const keys = Object.keys(parameters).map(parameter => "(:(\\.\\.\\.)?" + parameter + "\\b)").join("|");
sql = sql.replace(new RegExp(keys, "g"), (key: string) => {
let value: any;
if (key.substr(0, 4) === ":...") {
value = parameters[key.substr(4)];
} else {
value = parameters[key.substr(1)];
}
if (value instanceof Function) {
return value();
} else {
escapedParameters.push(value);
return "?";
}
}); // todo: make replace only in value statements, otherwise problems
return [sql, escapedParameters];
}
/**
* Escapes a column name.
*/
escape(columnName: string): string {
return "`" + columnName + "`";
}
/**
* Build full table name with database name, schema name and table name.
* E.g. "myDB"."mySchema"."myTable"
*/
buildTableName(tableName: string, schema?: string, database?: string): string {
return database ? `${database}.${tableName}` : tableName;
}
/**
* Prepares given value to a value to be persisted, based on its column type and metadata.
*/
preparePersistentValue(value: any, columnMetadata: ColumnMetadata): any {
if (columnMetadata.transformer)
value = ApplyValueTransformers.transformTo(columnMetadata.transformer, value);
if (value === null || value === undefined)
return value;
if (columnMetadata.type === Boolean) {
return value === true ? 1 : 0;
} else if (columnMetadata.type === "date") {
return DateUtils.mixedDateToDateString(value);
} else if (columnMetadata.type === "time") {
return DateUtils.mixedDateToTimeString(value);
} else if (columnMetadata.type === "json") {
return JSON.stringify(value);
} else if (columnMetadata.type === "timestamp" || columnMetadata.type === "datetime" || columnMetadata.type === Date) {
return DateUtils.mixedDateToDate(value);
} else if (columnMetadata.type === "simple-array") {
return DateUtils.simpleArrayToString(value);
} else if (columnMetadata.type === "simple-json") {
return DateUtils.simpleJsonToString(value);
} else if (columnMetadata.type === "enum" || columnMetadata.type === "simple-enum") {
return "" + value;
} else if (columnMetadata.type === "set") {
return DateUtils.simpleArrayToString(value);
}
return value;
}
/**
* Prepares given value to a value to be persisted, based on its column type or metadata.
*/
prepareHydratedValue(value: any, columnMetadata: ColumnMetadata): any {
if (value === null || value === undefined)
return columnMetadata.transformer ? ApplyValueTransformers.transformFrom(columnMetadata.transformer, value) : value;
if (columnMetadata.type === Boolean || columnMetadata.type === "bool" || columnMetadata.type === "boolean") {
value = value ? true : false;
} else if (columnMetadata.type === "datetime" || columnMetadata.type === Date) {
value = DateUtils.normalizeHydratedDate(value);
} else if (columnMetadata.type === "date") {
value = DateUtils.mixedDateToDateString(value);
} else if (columnMetadata.type === "json") {
value = typeof value === "string" ? JSON.parse(value) : value;
} else if (columnMetadata.type === "time") {
value = DateUtils.mixedTimeToString(value);
} else if (columnMetadata.type === "simple-array") {
value = DateUtils.stringToSimpleArray(value);
} else if (columnMetadata.type === "simple-json") {
value = DateUtils.stringToSimpleJson(value);
} else if ((columnMetadata.type === "enum" || columnMetadata.type === "simple-enum")
&& columnMetadata.enum
&& !isNaN(value)
&& columnMetadata.enum.indexOf(parseInt(value)) >= 0) {
// convert to number if that exists in possible enum options
value = parseInt(value);
} else if (columnMetadata.type === "set") {
value = DateUtils.stringToSimpleArray(value);
}
if (columnMetadata.transformer)
value = ApplyValueTransformers.transformFrom(columnMetadata.transformer, value);
return value;
}
/**
* Creates a database type from a given column metadata.
*/
normalizeType(column: { type: ColumnType, length?: number|string, precision?: number|null, scale?: number }): string {
if (column.type === Number || column.type === "integer") {
return "int";
} else if (column.type === String) {
return "varchar";
} else if (column.type === Date) {
return "datetime";
} else if ((column.type as any) === Buffer) {
return "blob";
} else if (column.type === Boolean) {
return "tinyint";
} else if (column.type === "uuid") {
return "varchar";
} else if (column.type === "json" && this.options.type === "mariadb") {
/*
* MariaDB implements this as a LONGTEXT rather, as the JSON data type contradicts the SQL standard,
* and MariaDB's benchmarks indicate that performance is at least equivalent.
*
* @see https://mariadb.com/kb/en/json-data-type/
*/
return "longtext";
} else if (column.type === "simple-array" || column.type === "simple-json") {
return "text";
} else if (column.type === "simple-enum") {
return "enum";
} else if (column.type === "double precision" || column.type === "real") {
return "double";
} else if (column.type === "dec" || column.type === "numeric" || column.type === "fixed") {
return "decimal";
} else if (column.type === "bool" || column.type === "boolean") {
return "tinyint";
} else if (column.type === "nvarchar" || column.type === "national varchar") {
return "varchar";
} else if (column.type === "nchar" || column.type === "national char") {
return "char";
} else {
return column.type as string || "";
}
}
/**
* Normalizes "default" value of the column.
*/
normalizeDefault(columnMetadata: ColumnMetadata): string {
const defaultValue = columnMetadata.default;
if ((columnMetadata.type === "enum" || columnMetadata.type === "simple-enum") && defaultValue !== undefined) {
return `'${defaultValue}'`;
}
if ((columnMetadata.type === "set") && defaultValue !== undefined) {
return `'${DateUtils.simpleArrayToString(defaultValue)}'`;
}
if (typeof defaultValue === "number") {
return `'${defaultValue.toFixed(columnMetadata.scale)}'`;
} else if (typeof defaultValue === "boolean") {
return defaultValue === true ? "1" : "0";
} else if (typeof defaultValue === "function") {
return defaultValue();
} else if (typeof defaultValue === "string") {
return `'${defaultValue}'`;
} else if (defaultValue === null) {
return `NULL`;
} else {
return defaultValue;
}
}
/**
* Normalizes "isUnique" value of the column.
*/
normalizeIsUnique(column: ColumnMetadata): boolean {
return column.entityMetadata.indices.some(idx => idx.isUnique && idx.columns.length === 1 && idx.columns[0] === column);
}
/**
* Returns default column lengths, which is required on column creation.
*/
getColumnLength(column: ColumnMetadata|TableColumn): string {
if (column.length)
return column.length.toString();
/**
* fix https://github.com/typeorm/typeorm/issues/1139
*/
if (column.generationStrategy === "uuid")
return "36";
switch (column.type) {
case String:
case "varchar":
case "nvarchar":
case "national varchar":
return "255";
case "varbinary":
return "255";
default:
return "";
}
}
/**
* Creates column type definition including length, precision and scale
*/
createFullType(column: TableColumn): string {
let type = column.type;
// used 'getColumnLength()' method, because MySQL requires column length for `varchar`, `nvarchar` and `varbinary` data types
if (this.getColumnLength(column)) {
type += `(${this.getColumnLength(column)})`;
} else if (column.width) {
type += `(${column.width})`;
} else if (column.precision !== null && column.precision !== undefined && column.scale !== null && column.scale !== undefined) {
type += `(${column.precision},${column.scale})`;
} else if (column.precision !== null && column.precision !== undefined) {
type += `(${column.precision})`;
}
if (column.isArray)
type += " array";
return type;
}
/**
* Obtains a new database connection to a master server.
* Used for replication.
* If replication is not setup then returns default connection's database connection.
*/
obtainMasterConnection(): Promise<any> {
return new Promise<any>((ok, fail) => {
if (this.poolCluster) {
this.poolCluster.getConnection("MASTER", (err: any, dbConnection: any) => {
err ? fail(err) : ok(this.prepareDbConnection(dbConnection));
});
} else if (this.pool) {
this.pool.getConnection((err: any, dbConnection: any) => {
err ? fail(err) : ok(this.prepareDbConnection(dbConnection));
});
} else {
fail(new Error(`Connection is not established with mysql database`));
}
});
}
/**
* Obtains a new database connection to a slave server.
* Used for replication.
* If replication is not setup then returns master (default) connection's database connection.
*/
obtainSlaveConnection(): Promise<any> {
if (!this.poolCluster)
return this.obtainMasterConnection();
return new Promise<any>((ok, fail) => {
this.poolCluster.getConnection("SLAVE*", (err: any, dbConnection: any) => {
err ? fail(err) : ok(dbConnection);
});
});
}
/**
* Creates generated map of values generated or returned by database after INSERT query.
*/
createGeneratedMap(metadata: EntityMetadata, insertResult: any, entityIndex: number) {
const generatedMap = metadata.generatedColumns.reduce((map, generatedColumn) => {
let value: any;
if (generatedColumn.generationStrategy === "increment" && insertResult.insertId) {
// NOTE: When multiple rows is inserted by a single INSERT statement,
// `insertId` is the value generated for the first inserted row only.
value = insertResult.insertId + entityIndex;
// } else if (generatedColumn.generationStrategy === "uuid") {
// console.log("getting db value:", generatedColumn.databaseName);
// value = generatedColumn.getEntityValue(uuidMap);
}
return OrmUtils.mergeDeep(map, generatedColumn.createValueMap(value));
}, {} as ObjectLiteral);
return Object.keys(generatedMap).length > 0 ? generatedMap : undefined;
}
/**
* Differentiate columns of this table and columns from the given column metadatas columns
* and returns only changed.
*/
findChangedColumns(tableColumns: TableColumn[], columnMetadatas: ColumnMetadata[]): ColumnMetadata[] {
return columnMetadatas.filter(columnMetadata => {
const tableColumn = tableColumns.find(c => c.name === columnMetadata.databaseName);
if (!tableColumn)
return false; // we don't need new columns, we only need exist and changed
// console.log("table:", columnMetadata.entityMetadata.tableName);
// console.log("name:", tableColumn.name, columnMetadata.databaseName);
// console.log("type:", tableColumn.type, this.normalizeType(columnMetadata));
// console.log("length:", tableColumn.length, columnMetadata.length);
// console.log("width:", tableColumn.width, columnMetadata.width);
// console.log("precision:", tableColumn.precision, columnMetadata.precision);
// console.log("scale:", tableColumn.scale, columnMetadata.scale);
// console.log("zerofill:", tableColumn.zerofill, columnMetadata.zerofill);
// console.log("unsigned:", tableColumn.unsigned, columnMetadata.unsigned);
// console.log("asExpression:", tableColumn.asExpression, columnMetadata.asExpression);
// console.log("generatedType:", tableColumn.generatedType, columnMetadata.generatedType);
// console.log("comment:", tableColumn.comment, columnMetadata.comment);
// console.log("default:", tableColumn.default, columnMetadata.default);
// console.log("enum:", tableColumn.enum, columnMetadata.enum);
// console.log("default changed:", !this.compareDefaultValues(this.normalizeDefault(columnMetadata), tableColumn.default));
// console.log("onUpdate:", tableColumn.onUpdate, columnMetadata.onUpdate);
// console.log("isPrimary:", tableColumn.isPrimary, columnMetadata.isPrimary);
// console.log("isNullable:", tableColumn.isNullable, columnMetadata.isNullable);
// console.log("isUnique:", tableColumn.isUnique, this.normalizeIsUnique(columnMetadata));
// console.log("isGenerated:", tableColumn.isGenerated, columnMetadata.isGenerated);
// console.log((columnMetadata.generationStrategy !== "uuid" && tableColumn.isGenerated !== columnMetadata.isGenerated));
// console.log("==========================================");
let columnMetadataLength = columnMetadata.length;
if (!columnMetadataLength && columnMetadata.generationStrategy === "uuid") { // fixing #3374
columnMetadataLength = this.getColumnLength(columnMetadata);
}
return tableColumn.name !== columnMetadata.databaseName
|| tableColumn.type !== this.normalizeType(columnMetadata)
|| tableColumn.length !== columnMetadataLength
|| tableColumn.width !== columnMetadata.width
|| (columnMetadata.precision !== undefined && tableColumn.precision !== columnMetadata.precision)
|| (columnMetadata.scale !== undefined && tableColumn.scale !== columnMetadata.scale)
|| tableColumn.zerofill !== columnMetadata.zerofill
|| tableColumn.unsigned !== columnMetadata.unsigned
|| tableColumn.asExpression !== columnMetadata.asExpression
|| tableColumn.generatedType !== columnMetadata.generatedType
// || tableColumn.comment !== columnMetadata.comment // todo
|| !this.compareDefaultValues(this.normalizeDefault(columnMetadata), tableColumn.default)
|| (tableColumn.enum && columnMetadata.enum && !OrmUtils.isArraysEqual(tableColumn.enum, columnMetadata.enum.map(val => val + "")))
|| tableColumn.onUpdate !== columnMetadata.onUpdate
|| tableColumn.isPrimary !== columnMetadata.isPrimary
|| tableColumn.isNullable !== columnMetadata.isNullable
|| tableColumn.isUnique !== this.normalizeIsUnique(columnMetadata)
|| (columnMetadata.generationStrategy !== "uuid" && tableColumn.isGenerated !== columnMetadata.isGenerated);
});
}
/**
* Returns true if driver supports RETURNING / OUTPUT statement.
*/
isReturningSqlSupported(): boolean {
return false;
}
/**
* Returns true if driver supports uuid values generation on its own.
*/
isUUIDGenerationSupported(): boolean {
return false;
}
/**
* Returns true if driver supports fulltext indices.
*/
isFullTextColumnTypeSupported(): boolean {
return true;
}
/**
* Creates an escaped parameter.
*/
createParameter(parameterName: string, index: number): string {
return "?";
}
// -------------------------------------------------------------------------
// Protected Methods
// -------------------------------------------------------------------------
/**
* Loads all driver dependencies.
*/
protected loadDependencies(): void {
try {
this.mysql = PlatformTools.load("mysql"); // try to load first supported package
/*
* Some frameworks (such as Jest) may mess up Node's require cache and provide garbage for the 'mysql' module
* if it was not installed. We check that the object we got actually contains something otherwise we treat
* it as if the `require` call failed.
*
* @see https://github.com/typeorm/typeorm/issues/1373
*/
if (Object.keys(this.mysql).length === 0) {
throw new Error("'mysql' was found but it is empty. Falling back to 'mysql2'.");
}
} catch (e) {
try {
this.mysql = PlatformTools.load("mysql2"); // try to load second supported package
} catch (e) {
throw new DriverPackageNotInstalledError("Mysql", "mysql");
}
}
}
/**
* Creates a new connection pool for a given database credentials.
*/
protected createConnectionOptions(options: MysqlConnectionOptions, credentials: MysqlConnectionCredentialsOptions): Promise<any> {
credentials = Object.assign({}, credentials, DriverUtils.buildDriverOptions(credentials)); // todo: do it better way
// build connection options for the driver
return Object.assign({}, {
charset: options.charset,
timezone: options.timezone,
connectTimeout: options.connectTimeout,
insecureAuth: options.insecureAuth,
supportBigNumbers: options.supportBigNumbers !== undefined ? options.supportBigNumbers : true,
bigNumberStrings: options.bigNumberStrings !== undefined ? options.bigNumberStrings : true,
dateStrings: options.dateStrings,
debug: options.debug,
trace: options.trace,
multipleStatements: options.multipleStatements,
flags: options.flags
}, {
host: credentials.host,
user: credentials.username,
password: credentials.password,
database: credentials.database,
port: credentials.port,
ssl: options.ssl
},
options.acquireTimeout === undefined
? {}
: { acquireTimeout: options.acquireTimeout },
options.extra || {});
}
/**
* Creates a new connection pool for a given database credentials.
*/
protected createPool(connectionOptions: any): Promise<any> {
// create a connection pool
const pool = this.mysql.createPool(connectionOptions);
// make sure connection is working fine
return new Promise<void>((ok, fail) => {
// (issue #610) we make first connection to database to make sure if connection credentials are wrong
// we give error before calling any other method that creates actual query runner
pool.getConnection((err: any, connection: any) => {
if (err)
return pool.end(() => fail(err));
connection.release();
ok(pool);
});
});
}
/**
* Attaches all required base handlers to a database connection, such as the unhandled error handler.
*/
private prepareDbConnection(connection: any): any {
const { logger } = this.connection;
/*
Attaching an error handler to connection errors is essential, as, otherwise, errors raised will go unhandled and
cause the hosting app to crash.
*/
if (connection.listeners("error").length === 0) {
connection.on("error", (error: any) => logger.log("warn", `MySQL connection raised an error. ${error}`));
}
return connection;
}
/**
* Checks if "DEFAULT" values in the column metadata and in the database are equal.
*/
protected compareDefaultValues(columnMetadataValue: string, databaseValue: string): boolean {
if (typeof columnMetadataValue === "string" && typeof databaseValue === "string") {
// we need to cut out "'" because in mysql we can understand returned value is a string or a function
// as result compare cannot understand if default is really changed or not
columnMetadataValue = columnMetadataValue.replace(/^'+|'+$/g, "");
databaseValue = databaseValue.replace(/^'+|'+$/g, "");
}
return columnMetadataValue === databaseValue;
}
}<|fim▁end|>
|
*/
export class MysqlDriver implements Driver {
|
<|file_name|>bitcoin_ar.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="ar" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Woodcoin</source>
<translation>عن Woodcoin</translation>
</message>
<message>
<location line="+39"/>
<source><b>Woodcoin</b> version</source>
<translation>نسخة <b>Woodcoin</b></translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The Woodcoin developers</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>دفتر العناوين</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>أنقر على الماوس مرتين لتعديل عنوان</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>قم بعمل عنوان جديد</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>قم بنسخ القوانين المختارة لحافظة النظام</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Woodcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Woodcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Woodcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&أمسح</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Woodcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR WOODCOINS</b>!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-56"/>
<source>Woodcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your woodcoins from being stolen by malware infecting your computer.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Show information about Woodcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Woodcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Woodcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Woodcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>&About Woodcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Woodcoin addresses to prove you own them</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Woodcoin addresses</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+47"/>
<source>Woodcoin client</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Woodcoin network</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation type="unfinished"/>
</message>
<message><|fim▁hole|> <location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Woodcoin address or malformed URI parameters.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Woodcoin can no longer continue safely and will quit.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Woodcoin address.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Woodcoin-Qt</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Automatically start Woodcoin after logging in to the system.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Start Woodcoin on system login</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Woodcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Connect to the Woodcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Woodcoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+9"/>
<source>Whether to show Woodcoin addresses in the transaction list or not.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Woodcoin.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Woodcoin network after a connection is established, but this process has not completed yet.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation type="unfinished"/>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start woodcoin: click-to-pay handler</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Show the Woodcoin-Qt help message to get a list with possible Woodcoin command-line options.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-104"/>
<source>Woodcoin - Debug window</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+25"/>
<source>Woodcoin Core</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Open the Woodcoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Woodcoin RPC console.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Woodcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Woodcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Woodcoin address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation type="unfinished"/>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Woodcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Enter Woodcoin signature</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Woodcoin developers</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation type="unfinished"/>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation type="unfinished"/>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Woodcoin version</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or woodcoind</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: woodcoin.conf)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: woodcoind.pid)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 33002 or testnet: 44002)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 33001 or testnet: 44001)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=woodcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Woodcoin Alert" [email protected]
</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Woodcoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Woodcoin will not work properly.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Woodcoin Wiki for SSL setup instructions)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Woodcoin</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Woodcoin to complete</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Woodcoin is probably already running.</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation type="unfinished"/>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation type="unfinished"/>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation type="unfinished"/>
</message>
</context>
</TS><|fim▁end|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.